query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Used to hide the actual prefix dictionary.
def _getPrefixDict(self): if not hasattr(self, '_prefixDict'): self.__prefixDict = {} return self.__prefixDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _resetPrefixDict(self):\r\n self._getPrefixDict().clear()", "def remove_prefix(self, state_dict, prefix):\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}", "def _diagnose_prefixes(self):\n return set()", "def remove_prefix(self, state_dict, prefix):\n return {\n (lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x)(\n key\n ): value\n for key, value in state_dict.items()\n }", "def prefix(self):\n return self[\"prefix\"]", "def prefix(self):\n return self[\"prefix\"]", "def ikeys(self, prefix=''):", "def empty_prefix(self):\r\n raise NotImplementedError()", "def _temp_prefix(cls) -> str:\n pass", "def default_prefix(self) -> str:", "def _extra_keys(self):\r\n return []", "def add_prefix(self, state_dict, prefix):\n print('add prefix \\'{}\\''.format(prefix))\n f = lambda x: x + prefix # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}", "def _init_prefix(self):\n self._.prefix = \"v%x\" % (hash(self) % Integer(2)**32)", "def exclude_from_prefixing(self, inp):\n raise NotImplementedError", "def default_prefix(self) -> str:\n return \"\"", "def default_prefix(self) -> str:\n return \"\"", "def set_prefix(prefix):\n PLUGINS.set_prefix(prefix)", "def hotkey_prefix(self, hotkey_prefix):\n allowed_values = [\"None\", \"Show\", \"Hide\"] # noqa: E501\n if not hotkey_prefix.isdigit():\t\n if hotkey_prefix not in allowed_values:\n raise ValueError(\n \"Invalid value for `hotkey_prefix` ({0}), must be one of {1}\" # noqa: E501\n .format(hotkey_prefix, allowed_values))\n self._hotkey_prefix = hotkey_prefix\n else:\n self._hotkey_prefix = allowed_values[int(hotkey_prefix) if six.PY3 else long(hotkey_prefix)]", "def filter_dict_keystartswith(d, prefix):\n if d is None or isinstance(d, Undefined):\n return d\n\n if sys.version_info[0] < 3:\n return [v for k, v in d.viewitems() if k.startswith(prefix)]\n else:\n return [v for k, v in d.items() if k.startswith(prefix)]", "def llm_prefix(self) -> str:\n return \"Thought:\"", "def _ensure_prefix_is_set(chunk_info, telstate):\n for info in chunk_info.values():\n if 'prefix' not in info:\n info['prefix'] = telstate['chunk_name']\n return chunk_info", "def test_ipam_prefixes_delete(self):\n pass", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def _get_object_properties(self):\n super()._get_object_properties()\n add_prefix(root=self.root, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def get_var_prefix(self):\n return ''", "def _get_prefix_attributes(self, prefix):\n return filter_dict_by_prefix(self.__dict__, prefix)", "def getPrefix(self):\n return \"20gig\"", "def add_prefix(dic, prefix):\n return {f'{prefix}_{key}': item for key, item in dic.items()}", "def key_not_starts_with(self, key_not_starts_with):\n\n self._key_not_starts_with = key_not_starts_with", "def test_prefix(self):\n self.chck_triple('prefix')", "def entity_prefix(self):", "def get_prefix(self):\n return self.prefix", "async def prefix(self, ctx, prefix):\n # Get the server language\n lang = getLang(ctx.message.guild.id)\n\n if len(prefix) > 10:\n with open(f\"embeds/{lang}/prefix.json\", \"r\") as f:\n await ctx.reply(embed=discord.Embed.from_dict(json.load(f)['len-error']), delete_after=20)\n\n # Change prefix\n with open('serverconfig/prefixes.json', 'r') as f:\n prefixes = json.load(f)\n old_prefix = prefixes[str(ctx.guild.id)]\n prefixes[str(ctx.guild.id)] = prefix\n with open('serverconfig/prefixes.json', 'w') as f:\n json.dump(prefixes, f, indent=4)\n\n # Get the embed of the right language and send with replaced variable\n with open(f\"embeds/{lang}/prefix.json\", \"r\") as f:\n embed = json.load(f)['embed']\n\n embed['description'] = embed['description'].replace(\"%VAR\", prefix)\n await ctx.reply(embed=discord.Embed.from_dict(embed), mention_author=False, delete_after=20)", "def rawkeys(self):\n k = {}\n for key, names in self.search_names.items():\n for name in names:\n if name in self.channel_names:\n k[key] = name\n break\n\n assert 'Current' in k\n assert 'Bias' in k\n assert self.LI in k\n\n k['LI'] = k[self.LI]\n\n return SimpleNamespace(**k)", "def removeprefix(self, x) -> String:\n pass", "def _shorten_key(telstate, key):\n for prefix in telstate.prefixes:\n if key.startswith(prefix):\n return key[len(prefix):]\n return ''", "def _makeInternalIdentifier(self, prefix, key):\n\n return '_:' + hashlib.sha1(\n ('fb'+prefix+'key'+key).encode('utf-8')).hexdigest()[1:20]", "def _prefixed(nt: namedtuple, prefix):\n result = {}\n for key, value in nt._asdict().items():\n result[prefix + key] = value\n return result", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self, group):\n return", "def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')", "def realkeys(self):\n\n return filter(lambda s: s[:1] != \"~\", self.keys())", "def filter_by_key_prefix(dictionary, prefix, remove_prefix=True):\n return_dictionary = {}\n for key in dictionary:\n if key.startswith(prefix):\n return_key = key[len(prefix):] if remove_prefix else key\n return_dictionary[return_key] = copy.deepcopy(dictionary[key])\n return return_dictionary", "def gettempprefix():\n\tpass", "def prefix_info(self,prefix_id):\n \n url = self.BASE_URL + 'prefixes/' + prefix_id\n \n return self._make_get_request(url,models.prefix_single)", "def prefix(self, prefix):\n\n self._prefix = prefix", "def prefix(self, prefix):\n\n self._prefix = prefix", "def eliminate_key (self,key):\r\n\r\n if self.using_shelf:\r\n\r\n del self.key_dict[str(key)]", "def removePrefix(self,text, prefix):\n\n return text[text.startswith(prefix) and len(prefix):]", "def _get_prefix(obj):\n return obj._prefix if obj._prefix is not PREFIX_NOT_SET else DEFAULT_PREFIX", "def unprefix(curie):\n parts = curie.split(\":\", 1)\n if len(parts) > 1 and parts[0] in PREFIXES:\n return PREFIXES.get(parts[0]) + parts[1]\n else:\n return curie", "def removeAllKeys(self) -> None:\n ...", "def harmonize_keys(self):\n self._data.key_regex_replacements = _key_regex_replacements\n self._data.key_replacements = _key_replacements", "def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers", "def getPrefix(self):\n raise NotImplementedError", "def filter_dic_by_key_prefix(dic,key_prefix_list):\n new_dic = {}\n for key in dic:\n retain = True\n for prefix in key_prefix_list:\n if key.startswith(prefix):\n retain = False\n if retain:\n new_dic[key] = dic[key]\n return new_dic", "def without_prefix(string, prefix):\n assert string.startswith(prefix)\n return string[len(prefix):]", "def __repr__(self):\n orig = self.__dict__\n di = {}\n for key, value in orig.items():\n if not key.startswith(\"_\"):\n di[key] = value\n return f\"{self.__class__.__name__}({str(di)})\"", "def set_prefix(self, prefix):\n self._prefix = prefix\n self._update_layout()", "def prefix(self):\n if len(self.desc) > 0:\n return self.desc + \" \"\n\n return \"\"", "def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']", "def get_prefix(self):\n return self._prefix", "def get_prefix(self):\n return self._prefix", "def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:\n return {k: v for k, v in obj.items() if not k.startswith(\"__\")}", "def undotted_keys(dict):\n return {k.lstrip(\".\"): v for k, v in dict.items()}", "def _hide_labels(self):\n pass", "def test_extra_prefixes(self) -> None:\n lib = hammer_tech.library_from_json('{\"openaccess techfile\": \"test/oa\"}') # type: hammer_tech.Library\n\n prefixes_orig = [hammer_tech.PathPrefix(prefix=\"test\", path=\"/tmp/test\")]\n\n prefixes = [hammer_tech.PathPrefix(prefix=\"test\", path=\"/tmp/test\")]\n lib.extra_prefixes = prefixes\n # Check that we get the original back even after mutating the original list.\n prefixes.append(hammer_tech.PathPrefix(prefix=\"bar\", path=\"/tmp/bar\"))\n self.assertEqual(lib.extra_prefixes, prefixes_orig)\n\n prefixes2 = lib.extra_prefixes\n # Check that we don't mutate the copy stored in the lib if we mutate after getting it\n prefixes2.append(hammer_tech.PathPrefix(prefix=\"bar\", path=\"/tmp/bar\"))\n self.assertEqual(lib.extra_prefixes, prefixes_orig)", "def resetPrefix(self):\n pp = self.rendererWindow.getCurrentPipelinePage()\n\n if pp is None:\n filename = \"\"\n\n else:\n filename = pp.filename\n\n guess = self.guessFilePrefix(filename)\n\n self.fileprefix.setText(guess)", "def _remove_prefix(self, path, prefix):\n expression = f\"_remove_prefix({repr(path)}, {repr(prefix)})\"\n return eval(expression, self.bzl_globals)", "def _pre_hook(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n):\n k = prefix + \"pe\"\n if k in state_dict:\n state_dict.pop(k)", "def test_ipam_prefixes_update(self):\n pass", "def safe_data(self):\r\n hide = ['_password', 'password', 'is_admin', 'api_key']\r\n return dict(\r\n [(k, v) for k, v in dict(self).iteritems() if k not in hide]\r\n )", "def _dump_prefix(guard: str) -> List[str]:\n\n return [\n f\"#ifndef {guard}\",\n f\"#define {guard}\",\n \"// <<< Use Configuration Wizard in Context Menu >>>\",\n \"#ifdef USE_APP_CONFIG\",\n '#include \"app_config.h\"',\n \"#endif\"\n ]", "def strip_local_prefix(self, stmt, qname):\n pref, colon, name = qname.partition(\":\")\n if colon and pref == stmt.i_module.i_prefix:\n return name\n else:\n return qname", "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "def resource_prefix(self):", "def hq_lq_prefix_dict_pickle(self):\n return op.join(self.combined_dir, 'all.hq_lq_pre_dict.pickle')", "def __init__(self):\n super(sppasSeparatorSettings, self).__init__()\n self.__dict__ = dict(\n phonemes=\"-\", # X-SAMPA standard\n syllables=\".\", # X-SAMPA standard\n variants=\"|\" # used for all alternative tags\n )", "async def prefix(self, _bot, message: discord.Message):\n mention = [self.user.mention + ' ', f'<@!{self.user.id}> ']\n additional_prefixes = await self.get_prefixes(message.guild)\n return self.cfg['bot']['prefixes'] + mention + additional_prefixes", "def generate_rename_tabled(self, prefix):\n return \"#define %s%s g_symbol_table.%s\" % (prefix, self.__name, self.__name)", "def _show(node: dict, prefix=\"\"):\n print(prefix)\n for key, value in node.items():\n if key in {ITEMSKEY, SUFFIXKEY}:\n print(f\"{prefix}{key}: {value}\")\n else:\n _show(value, prefix + key)", "def _prefix(self):\n name = self.__class__.__name__\n return name[:2] + ''.join(c for c in name if c.isupper())[1:]", "def _fix_full(self, prefix):\n for k in self.params:\n if k.startswith(prefix):\n self.params[k].vary = False", "def dump(self):\n return dict([(k, v) for k, v in vars(self).items() if not k.startswith('_')])", "async def prefix(self, ctx, *, prefix=None):\n\n current = self.bot.prefix\n embed = Embed(\n title=\"Current prefix\", color=self.bot.main_color, description=f\"{current}\"\n )\n\n if prefix is None:\n await ctx.send(embed=embed)\n else:\n embed.title = \"Changed prefix!\"\n embed.description = f\"Set prefix to `{prefix}`\"\n self.bot.config[\"prefix\"] = prefix\n await self.bot.config.update()\n await ctx.send(embed=embed)", "def remove_prefix(z, prefix):\n if z.startswith(prefix):\n return re.sub(r\"^{}\".format(prefix), \"\", z)\n else:\n return z", "def generate_novel_prefix(self):\n return self.tree.generate_novel_prefix(self.random)", "def _field_prefix(self):\n if self.layer_name == 'geninfo':\n return ''\n return self.layer_name + '.'", "def _get_hash_prefixes(self):\n \n client_state = None\n\n self._get_threats_update()", "async def setprefix(self, ctx, *, prefix=bot_prefix):\n prefix = prefix.lower()\n current_server_prefix = await self.ex.get_server_prefix(ctx.guild.id)\n if len(prefix) > 8:\n await ctx.send(\"> **Your prefix can not be more than 8 characters.**\")\n else:\n # Default prefix '%' should never be in DB.\n if current_server_prefix == \"%\":\n if prefix != \"%\":\n await self.ex.conn.execute(\"INSERT INTO general.serverprefix VALUES ($1,$2)\", ctx.guild.id, prefix)\n self.ex.cache.server_prefixes[ctx.guild.id] = prefix\n else:\n if prefix != \"%\":\n await self.ex.conn.execute(\"UPDATE general.serverprefix SET prefix = $1 WHERE serverid = $2\",\n prefix, ctx.guild.id)\n self.ex.cache.server_prefixes[ctx.guild.id] = prefix\n else:\n await self.ex.conn.execute(\"DELETE FROM general.serverprefix WHERE serverid = $1\", ctx.guild.id)\n self.ex.cache.server_prefixes.pop(ctx.guild.id, None)\n await ctx.send(f\"> **This server's prefix has been set to {prefix}.**\")", "def sliceoffparams(pardict, parprefix):\n return {k: v.value for k, v in pardict.items() if not k.startswith(parprefix)}", "def ns_prefix_dict(g):\n return {ns: prefix.toPython() for (ns, prefix) in g.namespaces()}", "def strip_key_strings(po):\r\n newlist = [entry for entry in po if not is_key_string(entry.msgid)]\r\n del po[:]\r\n po += newlist", "def _strip_qt_binding_prefix(self, obj, data):\n parts = obj.__class__.__module__.split('.')\n if len(parts) > 1 and parts[1] == 'QtCore':\n prefix = '.'.join(parts[:2])\n data = data.replace(prefix, 'QtCore', 1)\n return data", "def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys", "def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys" ]
[ "0.6534732", "0.6454535", "0.6143842", "0.60910183", "0.5947643", "0.5947643", "0.58735013", "0.58210343", "0.57964146", "0.57501936", "0.57409424", "0.57313424", "0.5716517", "0.57021815", "0.5605354", "0.5605354", "0.5517104", "0.54289126", "0.54218256", "0.54174894", "0.5410633", "0.540663", "0.5398462", "0.5398462", "0.5398462", "0.5376509", "0.53738797", "0.53635305", "0.5361047", "0.53430855", "0.5325155", "0.5294545", "0.52904767", "0.52758104", "0.5249006", "0.5244029", "0.5228175", "0.52132946", "0.520963", "0.5194153", "0.51759106", "0.51759106", "0.51759106", "0.51714516", "0.51640236", "0.51545733", "0.51543003", "0.51492935", "0.5132725", "0.5125012", "0.5125012", "0.51201004", "0.5108913", "0.5108734", "0.5104862", "0.50924313", "0.50809264", "0.5080136", "0.507796", "0.507462", "0.5072659", "0.50622", "0.5054357", "0.5049928", "0.50453174", "0.5044855", "0.5044855", "0.50304544", "0.5027448", "0.5025239", "0.5018794", "0.5011129", "0.49991986", "0.49988636", "0.49910092", "0.49849096", "0.4983189", "0.49726203", "0.49710307", "0.49700734", "0.49677777", "0.49651566", "0.49649704", "0.49573016", "0.49562338", "0.49524018", "0.49520627", "0.4950785", "0.4948759", "0.4940762", "0.4936977", "0.4936727", "0.49322206", "0.49308527", "0.49299115", "0.49246556", "0.490642", "0.49039897", "0.49019012", "0.49019012" ]
0.62041974
2
Clears the prefix dictionary, this needs to be done before creating a new typecode for a message (ie. before, and after creating a new message typecode)
def _resetPrefixDict(self): self._getPrefixDict().clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.footnotes = OrderedDict()\n self.unique_prefix += 1", "def remove_prefix(self, state_dict, prefix):\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}", "def reset (self):\n self.__inScopeNamespaces = self.__initialScopeNamespaces\n self.__inScopePrefixes = self.__initialScopePrefixes\n self.__mutableInScopeNamespaces = False\n self.__namespacePrefixCounter = 0", "def clear(self, prefix=PREFIX):\n for key in self.get_keys():\n # delete files in folder by not actual folder\n if key.startswith(prefix) and prefix + \"/\" != key:\n self.delete(key)", "def clear(self) -> None:\n # Delete these so the .by_class/name values are cleared.\n self['classname'] = 'info_null'\n del self['targetname']\n self._keys.clear()\n # Clear $fixup as well.\n self._fixup = None", "def clear_headers(self):\r\n\r\n # Remove things from the old dict as well\r\n self.reply_headers.clear()\r\n\r\n self.__reply_header_list[:] = []", "def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def clear_address(self): #DONE\n for component_name in self.__keys:\n self.address[component_name] = Component(component_name, '')", "def remove_prefix(self, state_dict, prefix):\n return {\n (lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x)(\n key\n ): value\n for key, value in state_dict.items()\n }", "def test_ipam_prefixes_delete(self):\n pass", "def clear(self):\n self.root = _NGramMapNode()\n self.size_freqs = dict()\n self.ele_freqs = dict()", "def clear_keymap(self):\n self.keymap = {}", "def clear(self):\n dict.clear(self)\n self._sequence = []", "def clear(self):\n self._map = {}", "def empty_prefix(self):\r\n raise NotImplementedError()", "def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}", "def reset(self):\n self._keyCode = \"\"\n self._keyCodeCount = 0\n self._keyCodeTime = 0.0", "def empty_prefix():\n if not hasattr(CompletionElement, \"static_empty_prefix\"):\n res = CompletionElement(\"\", None)\n res.mks0 = res.mks1 = res.mks2 = 0\n res.mks0_ = res.mks1_ = res.mks2_ = 0\n CompletionElement.static_empty_prefix = res\n return res\n else:\n return CompletionElement.static_empty_prefix", "def reset(self):\n self._maps = {}", "def reset(self):\n self.cardinality = 0\n self.sax_character = 0\n self.wildcardbits = 0", "def del_prefix(self, index):\n del self.bytes[:index]", "def clear(self):\n for key in self.keys():\n del self[key]", "def invalidate_key_group(self, prefix):\r\n self.add(prefix, 0)\r\n self.incr(prefix)", "def clear(self):\n\n self.size = 0\n\n self.table = [[]] * 100\n\n self.keys_set = set()\n\n self.keys_ref = [[]] * 100", "def reset(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n self.declarations_table = None\n self.annotations_table = None\n self.num_frames = 0\n self.num_frames_by_uid = {}\n self.num_frames_by_uid_pre_remove = {}", "def clear(self) :\n self.__dict__ = {}", "def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def clearMap(self):\n for key in self.componentMap.keys():\n del self.componentMap[key][:]", "def clear_grouping_keys(self):\n self.obj_payload[\"keys\"] = []", "def clear(self) -> None:\n self.loggers.clear()\n self.topics.clear()\n self.bindings = BindFlag(0)", "def reset(self):\n\t\tself.keywords = []\n\t\tself.locations = []\n\t\tself.usernames = []\n\n\t\tself._reset_buffer()", "def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None", "def clear(self):\n self.knownStrings.clear()", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def reset(self):\n self.registry = {}", "def _reset(self):\r\n self.pop(\"signature\", False)\r\n self.pop(\"signatures\", False)\r\n self.pop(\"signSignature\", False)\r\n self.pop(\"secondSignature\", False)\r\n self.pop(\"id\", False)", "def clear(self):\n self._store = {}", "def removeAllKeys(self) -> None:\n ...", "def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)", "def reset(cls):\r\n cls._ROOTS_BY_TYPE = {}\r\n cls._TYPES_BY_ROOT = {}\r\n cls._SEARCHED = set()", "def clear(self):\n self.__dict__.clear()", "def reset_type(self, name):\r\n del self._retype_dictionary[name]", "def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]", "def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None\n self.prev_attn = None", "def _remove_prefix(self, path, prefix):\n expression = f\"_remove_prefix({repr(path)}, {repr(prefix)})\"\n return eval(expression, self.bzl_globals)", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def clear(self):\n #for counterName in self.counters:\n # del self.counters[counterName]\n self.counters={}\n self.title=None", "def _clear(self):\n self._commands = []\n self._activeMacros = []\n self._index = 0\n self._emitSignals()\n self._inUndoRedo = False", "def reset(self):\n self.message = None\n self.status = 'msg msg-error'\n self.type_label = {}\n self.len_of_label = 0\n self.page = { \n \"name\": \"\",\n \"from\": \"\",\n \"import\": \"\",\n \"url\": {},\n \"title\": {},\n \"description\": {},\n \"content\": {},\n \"file\": \"\",\n \"label\": []\n }\n for code in self.languages:\n self.page['url'][code] = ''\n self.page['title'][code] = ''\n self.page['description'][code] = ''\n self.page['content'][code] = []", "def clear_messages(self):\n self.redis_client.delete(self.message_list)", "def reset(self):\n self.__handlers = []", "def clear(self):\n self.__hasTABLE = False\n self.__hasGRAPHS = False\n self.__ndoubledollar = 0\n buffer.clear(self)", "def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None", "def reset(self):\n self._topics.clear()", "def reset(self):\n\n self.type = None\n self.additional_context = \"\"\n super().reset()", "def clear(self):\n self.__hooks = odict()", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def _ensure_prefix_is_set(chunk_info, telstate):\n for info in chunk_info.values():\n if 'prefix' not in info:\n info['prefix'] = telstate['chunk_name']\n return chunk_info", "def clear(self):\n LongObjectHashMap.self.clear()", "def clear(self):\n LongObjectHashMap.self.clear()", "async def _clear(self, namespace=None):\n if namespace:\n for key in list(SimpleMemoryBackend._cache):\n if key.startswith(namespace):\n self.__delete(key)\n else:\n SimpleMemoryBackend._cache = {}\n SimpleMemoryBackend._handlers = {}\n return True", "def reset(self):\r\n self.look_up_table = list(map(convert_to_list, self.const_look_up_table))", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def delete_keys_with_prefix(prefix):\n rc = redis.StrictRedis(host=REDIS_SINGLE_HOST, port=REDIS_PORT, db=0)\n keys = rc.keys(\"*\" + prefix + \"*\")\n for key in keys:\n rc.delete(key)", "def resetPrefix(self):\n pp = self.rendererWindow.getCurrentPipelinePage()\n\n if pp is None:\n filename = \"\"\n\n else:\n filename = pp.filename\n\n guess = self.guessFilePrefix(filename)\n\n self.fileprefix.setText(guess)", "def clear(self): # real signature unknown; restored from __doc__\n pass", "def reset(self):\n self.getsCounter = 0\n\n # dictionary of processed requests for each client. Value for each\n # client is a dictionary with request id as key and transaction id as\n # value\n self.processedRequests = {} # type: Dict[str, Dict[int, str]]\n\n # dictionary of responses to be sent for each client. Value for each\n # client is an asyncio Queue\n self.responses = {} # type: Dict[str, asyncio.Queue]\n\n # dictionary with key as transaction id and `Reply` as\n # value\n self.transactions = {} # type: Dict[str, Reply]", "def removeAll(self):\n self.pDict.clear()", "def reset(self):\n self.keyToFile=dict()", "def _getPrefixDict(self):\r\n if not hasattr(self, '_prefixDict'):\r\n self.__prefixDict = {}\r\n return self.__prefixDict", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def clear(self):\r\n for key in self.conn.keys():\r\n self.conn.delete(key)", "def clear(self):\n self._fingerprint = 0", "def reset(self):\n if hasattr(self, \"W\"):\n del self.W\n if hasattr(self, \"T\"):\n del self.T\n if hasattr(self, \"P\"):\n del self.P", "def clear(self):\n self.globalDefines = {}\n self.axiom = self.setAxiomFromString(\"\")\n self.clearProductions()\n self.niterations = 1\n self.resultPString = None", "def clear(self):\n #self._global_filters = []\n #self._global_filters_imported = []\n for method in self.__slots__:\n if method[0] != '_':\n self[method] = OrderedDict()", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def clear():\n global d\n for key in d.keys():\n del d[key]", "def removeprefix(self, x) -> String:\n pass", "def clear(self):\n self.puml_tables = OrderedDict()\n self.current_table = None", "def clear(self):\n return _libsbml.XMLNamespaces_clear(self)", "def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()", "def reset(self):\n self.in_compact_method = False\n self.in_setup = False\n self.autoname_cursor = dict()", "def reset():\n for i in flags.keys(): flags[i] = 0\n for i in meta.keys(): meta[i] = \"\"\n return (None, \"CON\")", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def Clear(self): # real signature unknown; restored from __doc__\n pass", "def remove_prefix(z, prefix):\n if z.startswith(prefix):\n return re.sub(r\"^{}\".format(prefix), \"\", z)\n else:\n return z", "def clear(self):\n self.molo_tcp_pack.clear()\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True\n self.client_status = None", "def reset(cls):\n cls.__register = {}", "def reset(self):\n self.memory.clear()\n self.relative_base = 0\n self.input_queue.clear()\n self.instr_idx = 0" ]
[ "0.661214", "0.6500577", "0.6333235", "0.60439557", "0.6028406", "0.60041296", "0.5998321", "0.59930265", "0.59643847", "0.59600914", "0.59294903", "0.5922057", "0.5883665", "0.5853302", "0.58453923", "0.5836675", "0.5826535", "0.5804847", "0.5762894", "0.5750483", "0.5747566", "0.57125735", "0.56730354", "0.5659652", "0.5644342", "0.5637284", "0.5634939", "0.56243587", "0.5615651", "0.5598903", "0.5595926", "0.55945015", "0.5581859", "0.5569211", "0.5565801", "0.55346054", "0.5525669", "0.5520524", "0.55196893", "0.5515582", "0.5502515", "0.55005777", "0.54807097", "0.5477163", "0.5476047", "0.547498", "0.5466724", "0.5465179", "0.54570156", "0.539439", "0.53869903", "0.53743184", "0.5373868", "0.53737944", "0.53680354", "0.5365311", "0.53621465", "0.53555286", "0.5352712", "0.5351595", "0.5337196", "0.5337196", "0.53347766", "0.5332029", "0.5325608", "0.53245306", "0.53144157", "0.53129023", "0.5302365", "0.530177", "0.5300067", "0.5299447", "0.52910864", "0.5286465", "0.52741873", "0.5268346", "0.5263681", "0.5260391", "0.5250455", "0.5250077", "0.5244855", "0.52443486", "0.523764", "0.52360827", "0.5226775", "0.5221983", "0.5206419", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.52030843", "0.5201983", "0.52001446", "0.5197087", "0.51844376" ]
0.8281239
0
Returns a typecode instance representing the passed in element. element XMLSchema.ElementDeclaration instance literal literal encoding? local is locally defined? namespaceURI namespace
def _getElement(self, element, literal=False, local=False, namespaceURI=None): if not element.isElement(): raise TypeError, 'Expecting an ElementDeclaration' tc = None elementName = element.getAttribute('name') tp = element.getTypeDefinition('type') typeObj = None if not (tp or element.content): nsuriType,localName = element.getAttribute('type') typeClass = self._getTypeClass(nsuriType,localName) typeObj = typeClass(elementName) elif not tp: tp = element.content if not typeObj: typeObj = self._getType(tp, elementName, literal, local, namespaceURI) minOccurs = int(element.getAttribute('minOccurs')) typeObj.optional = not minOccurs maxOccurs = element.getAttribute('maxOccurs') typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1) return typeObj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def element_type(self) -> global___Type:", "def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)", "def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result", "def _get_element_type(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n el_type = None\n for value in self._client.wsdl.schema.types.values():\n if (value.name == element):\n if ('Simple' in value.id):\n el_type = 'Simple'\n elif ('Complex' in value.id):\n el_type = 'Complex'\n break\n\n return el_type", "def elementDecl(self, name, type, content):\n pass", "def convert_to_semanticsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n\r\n return SemanticSymbol.parse_from_mathml(root)", "def convert_to_layoutsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n ## print(\"parse_from_mathml tree: \" + xml.etree.ElementTree.tostring(root,encoding=\"unicode\"))\r\n return LayoutSymbol.parse_from_mathml(root)", "def parseTerm(element):\n tag, text = element.tag, element.text\n if tag == RESULTS_NS_ET + 'literal':\n if text is None:\n text = ''\n datatype = None\n lang = None\n if element.get('datatype', None):\n datatype = URIRef(element.get('datatype'))\n elif element.get(\"{%s}lang\" % XML_NAMESPACE, None):\n lang = element.get(\"{%s}lang\" % XML_NAMESPACE)\n\n ret = Literal(text, datatype=datatype, lang=lang)\n\n return ret\n elif tag == RESULTS_NS_ET + 'uri':\n return URIRef(text)\n elif tag == RESULTS_NS_ET + 'bnode':\n return BNode(text)\n else:\n raise TypeError(\"unknown binding type %r\" % element)", "def __call__(self, node):\n # we assume we know what this type is and raise and catch the key error\n # exception if we don't\n try:\n s = self.lut[node.tagName](node, self)\n except KeyError, e:\n raise DeclareError(e[0], node)\n\n # save this, for use with typedef's later\n self.symbols[s.getType()+s.getName()] = s\n\n return s", "def schema_elem(self) -> ElementType:\n return self.elem", "def createNodeElement(_session, _segment, _const):\n return createNode(_session, _segment, _const, \"element\")", "def FromXML(cls, doc, element, default=\"absolute\"):\n return cls(element.get(\"type\", default), NumberDef(element.text))", "def get_element_type(cls):\r\n return cls._type_name(cls.element_type)", "def _project_elem(self, elem, mapping):\r\n\t\tif isinstance(elem, basestring):\r\n\t\t\treturn elem\r\n\t\telif isinstance(elem, xmlmodel.XmlElem):\r\n\t\t\tcls = mapping.get_class_for(elem)\r\n\t\t\tif cls is None:\r\n\t\t\t\traise TypeError, 'Could not determine object class for \\'{0}\\' element for node type {1}'.format(elem.tag, type(self))\r\n\t\t\tif not isinstance(cls, NodeClass):\r\n\t\t\t\tif callable(cls):\r\n\t\t\t\t\tcls = cls()\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError, 'Object class for \\'{0}\\' element for node type {1} is of type {2}, should be a NodeClass or a callable'.format(elem.tag, type(self), type(cls))\r\n\t\t\tnode = self._projection_table.get(elem, cls)\r\n\t\t\tif node is None:\r\n\t\t\t\tnode = cls(self._projection_table, elem)\r\n\t\t\t\tself._projection_table.put(elem, cls, node)\r\n\t\t\t\tnode.node_init()\r\n\t\t\treturn node\r\n\t\telse:\r\n\t\t\traise TypeError, 'elem should be a string or an XmlElem'", "def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass", "def make_key(element_name, element_type, namespace):\n # only distinguish 'element' vs other types\n if element_type in ('complexType', 'simpleType'):\n eltype = 'complexType'\n else:\n eltype = element_type\n if eltype not in ('element', 'complexType', 'simpleType'):\n raise RuntimeError(\"Unknown element type %s = %s\" % (element_name, eltype))\n return (element_name, eltype, namespace)", "def getElementSymbol(self):\n dataDict = self.__dict__\n result = None\n return result", "def visit_Declaration(self, node):\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n base_code = compile(node.base.py_ast, self.filename, mode='eval')\n extend_ops([\n # f_globals = globals()\n (LOAD_GLOBAL, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n\n # eval_ = eval\n (LOAD_GLOBAL, 'eval'),\n (STORE_FAST, 'eval_'),\n\n # foo_cls = eval('Window', toolkit, f_globals)\n # foo = foo_cls.__enaml_call__(identifiers, toolkit)\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, base_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_FAST, 'identifiers'),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n # identifiers['foo'] = foo\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n \n visit = self.visit\n for item in node.body:\n visit(item)\n \n extend_ops([\n # return foo\n (LOAD_FAST, name),\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()", "def newElement(self,cls,attrib={}):\n elem = cls(**attrib)\n self.setFreeId(elem)\n if cls==Subtoken:\n self.subtokens[elem.id] = elem\n elif cls==DepToken:\n self.deptokens[elem.id] = elem\n elif cls==RelToken:\n self.reltokens[elem.id] = elem\n elif cls==DepEntity:\n self.depentities[elem.id] = elem\n elif cls==RelEntity:\n self.relentities[elem.id] = elem\n else:\n # It is caller responsibility to add elements to the graph\n pass\n \n return(elem)", "def from_element(cls, elem):\n return cls(elem.attrib['pid'], elem.attrib['name'], elem.text, elem.attrib['tags'])", "def CreateFromDOM (node, default_namespace=None):\r\n if default_namespace is None:\r\n default_namespace = Namespace.fallbackNamespace()\r\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def _globalElement(self, typeCode, namespaceURI, literal):\r\n if literal:\r\n typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s=\"%(namespaceURI)s\"' \\\r\n %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)", "def shape_element(element):\n node = {}\n if element.tag == \"node\" or element.tag == \"way\" :\n # Geo Data\n pos = [0,0]\n has_pos = False\n created = {}\n node['type'] = element.tag\n for key,value in element.attrib.iteritems():\n if key in CREATED:\n created[key] = value\n elif key in ['lat','lon']:\n has_pos = True\n if key == 'lat':\n pos[0] = float(value)\n else:\n pos[1] = float(value)\n else:\n node[key] = value\n address = {}\n has_address = False\n for tag in element.iter('tag'):\n if problemchars.search(tag.get('k')) is not None:\n continue\n elif 'addr:' in tag.get('k'):\n has_address = True\n addr_list = tag.get('k').split(\":\")\n address[\"\".join(addr_list[1:])] = tag.get('v')\n # clean the phone data\n elif 'phone' in tag.get('k'):\n \tnode[tag.get('k')] = clean_phone(tag.get('v'))\n # convert the facility type\n elif 'social_facility' in tag.get('k'):\n node[tag.get('k')] = \"应急避难场所\"\n # clean the capacity data\n elif 'capacity' in tag.get('k'):\n node[tag.get('k')] = clean_capacity(tag.get('v'))\n else:\n node[tag.get('k')] = tag.get('v')\n node_refs = []\n has_node_refs = False\n for tag in element.iter('nd'):\n has_node_refs = True\n node_refs.append(tag.get('ref'))\n if has_node_refs:\n node['node_refs'] = node_refs\n node['created'] = created\n if has_pos:\n node['pos'] = pos\n if has_address:\n node['address'] = address\n return node\n else:\n return None", "def makeelement(self, _tag, attrib=None, nsmap=None, **_extra): # real signature unknown; restored from __doc__\n pass", "def define(self, name, constructor, options=None):\n if '-' not in name:\n raise ValueError('Invalid custom element name. Must contain hypen: ' + name)\n # el = document.createElement(name)\n # el.constructor = constructor\n from domonic.html import tag\n from domonic.dom import Element\n el = type(name, (tag, Element), {'name': name, '__init__': constructor})\n if options is not None:\n if 'extends' in options:\n el.extends = options['extends']\n self.store[name] = el\n return el", "def __init__(self, node, declare):\n symbol.__init__(self, node, declare, \"typedef\")\n # say we don't alias a structure\n self.sym = None\n # now work out what the define is\n define = getOptionalNode(node, \"alias\")\n syms = getOptionalNode(node, \"members\")\n if define is None and syms is None:\n raise TagError([\"alias\", \"members\"], node)\n if syms is None:\n # this is just a value\n self.define = self.value(define.childNodes[0].data)\n else:\n s = filter(lambda n: n.nodeType == n.ELEMENT_NODE, syms.childNodes)\n if len(s) != 1:\n raise Exception(\"Multiple symbols in typedef declaration: %s\" % self.name)\n # this is the structure\n self.define = declare(s[0])", "def declaration(self) -> global___Statement.Declaration:", "def byte_sequence(self) -> global___Statement.Declaration:", "def f1(elem):\n builder = Builder()\n parse_object(elem,builder)\n print(builder.to_encoding())", "def _function_element_class(self):\n return FriCASFunctionElement", "def FromXML(cls, doc, element):\n return cls(element.text)", "def FromXML(cls, doc, element):\n return cls(element.text)", "def FromXML(cls, doc, element):\n return cls(element.text)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, _fallback_namespace=default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, _fallback_namespace=default_namespace)", "def CreateFromDOM (node, default_namespace=None):\n if default_namespace is None:\n default_namespace = Namespace.fallbackNamespace()\n return pyxb.binding.basis.element.AnyCreateFromDOM(node, _fallback_namespace=default_namespace)", "def FromXML(cls, doc, element):\n return cls(element.get(\"type\", \"absolute\"), NumberDef(element.text))", "def _create_element(tag, text=\"\", attr={}, namespace=Xmlns_path):\n element = Et.Element('.//' + namespace + tag, attr)\n element.text = text\n return element", "def ElementType(self) -> _n_0_t_1:", "def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\n problem_chars=problemchars, default_tag_type='regular'):\n\n node_attribs = {}\n way_attribs = {}\n way_nodes = []\n tags = [] # Handle secondary tags the same way for both node and way elements\n\n if element.tag == 'node':\n for i in NODE_FIELDS:\n node_attribs[i] = element.attrib[i]\n for tag in element.iter(\"tag\"):\n tag_dict= {}\n # Applying correction for post code in node element\n if is_postal_name(tag):\n # postcode \"110089\" has been incorrectly entered as\"10089\" \n if tag.attrib['v'] == \"10089\":\n # Correcting postcode to its right value\n tag.attrib[\"v\"] = \"110089\"\n # postcodes starting from \"2\" are wrong so omit the postcode tag \n elif tag.attrib['v'][0] == \"2\":\n continue\n # Calling the function to clean the language code problems\n tag.attrib['k'] = update_name(tag.attrib['k'], mapping)\n tag_dict['id'] = node_attribs['id']\n key = tag.attrib['k']\n if re.search(problemchars, tag.attrib['k']):\n pass\n if re.search(lower_colon, tag.attrib['k']):\n pass\n if ':' in tag.attrib['k']:\n type = key[: key.index(':')]\n key = key[key.index(':')+1 :] \n else:\n type = 'regular' \n tag_dict['key'] = key\n tag_dict['value'] = tag.attrib['v']\n tag_dict['type'] = type\n tags.append(tag_dict)\n return {'node': node_attribs, 'node_tags': tags}\n elif element.tag == 'way':\n for way in WAY_FIELDS:\n way_attribs[way] = element.attrib[way]\n for tag in element.iter(\"tag\"):\n tag_dict1= {}\n # Applying correction for post code in node element\n if is_postal_name(tag):\n # postcode \"110089\" has been incorrectly entered as\"10089\" \n if tag.attrib['v'] == \"10089\":\n # Correcting postcode to its right value\n tag.attrib[\"v\"] = \"110089\"\n # postcodes starting from \"2\" are wrong so omit the postcode tag \n elif tag.attrib['v'][0] == \"2\":\n continue\n tag_dict1['id'] = way_attribs['id']\n key = tag.attrib['k']\n if re.search(PROBLEMCHARS, tag.attrib['k']):\n pass\n if re.search(PROBLEMCHARS, tag.attrib['k']):\n pass\n if ':' in tag.attrib['k']:\n type = key[: key.index(':')]\n key = key[key.index(':')+1 :]\n else:\n type = 'regular' \n tag_dict1['key'] = key\n tag_dict1['value'] = tag.attrib['v']\n tag_dict1['type'] = type\n tags.append(tag_dict1) \n i= 0\n for tag in element.iter(\"nd\"):\n way_dict = {}\n way_dict[\"id\"] = way_attribs[\"id\"]\n way_dict[\"node_id\"] = tag.attrib[\"ref\"]\n way_dict[\"position\"] = i\n way_nodes.append(way_dict)\n i +=1 \n return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}", "def get_element(self,elementname):\n if elementname == 'shape' and self.t == None:\n raise ValueError('When shape is used, the t value has to be set.')\n elif elementname != 'shape' and self.t != None:\n raise ValueError('When shape is not used, the t value should not be set.')\n\n element = ET.Element(elementname,attrib=self.get_attributes())\n \n return element", "def _get_element_ns(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n ns = None\n for key in self._client.wsdl.schema.types.keys():\n if (key[0] == element):\n ns = key[1]\n break\n\n return ns", "def __init__(self, node):\n\t\t\tself.name = node.get('name')\n\t\t\tif not re.match(\"^[0-9A-Z_]*$\", self.name):\n\t\t\t\traise ParserException(\"Attribute name of element in enum has to be UPPER_UNDERSCORE_STYLE (found: '%s')\" % (self.name))\n\t\t\t\n\t\t\tself.string = node.get('string')\n\t\t\tif self.string is None:\n\t\t\t\tself.string = self.name\n\t\t\t\t\n\t\t\tself.description = xml_utils.get_description(node)\n\t\t\tself.string = xml_utils.get_string(node)\n\t\t\t\n\t\t\tvalue = node.get('value')\n\t\t\tself.value = None if (value is None) else int(value, 0)", "def FromXML(cls, doc, element):\n for subelem in list(element):\n tag = realtag(subelem)\n if tag == \"direction\":\n direction = Direction.FromXML(doc, subelem)\n elif tag == \"term\":\n term = INumberDef(subelem.text)\n try:\n return cls(term, direction)\n except UnboundLocalError as exc:\n raise ParseError(str(exc))", "def getInterfaceElement(publication):\n\treturn _getInterfaceMaker(publication.render)(publication)", "def getTypeCode(self):\n return _libsbml.ReplacedElement_getTypeCode(self)", "def _createVetor(cls, elem):\n return cls(elem)", "def _generate_type(self, n, modifiers=[], emit_declname = True):\n\t\ttyp = type(n)\n\n\t\t#~ print(n, modifiers)\n\n\t\tif typ == pycparser.c_ast.TypeDecl:\n\t\t\ts = ''\n\t\t\tif n.quals: s += ' '.join(n.quals) + ' '\n\t\t\ts += self.visit(n.type)\n\n\t\t\t# Local variables & parameter renaming.\n\t\t\t#\n\t\t\t# Variable name substitution only applies to local variables or parameters names within function prototypes\n\t\t\t# (thus, global variables and function names need to be excluded)\n\t\t\t#\n\t\t\t# case 1: level-0 function parameters (no remanimg for nested parameters)\n\t\t\t# case 2: local variable declaration (thus excluding functions, global vars, struct-enum-union fields, nested parameters)\n\t\t\t#\n\t\t\tif self.__visitingParam == 1: # case 1\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case I)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.paramprefix + self.__currentFunction + '_'+self.inlineInfix #S:\n\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\telse: \n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)]\n\t\t\t\tn.declname = (self.paramprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname) if n.declname else '' #S:\n\t\t\t\n\t\t\telif (self.__visitingParam == 0 and # case 2\n\t\t\t\t\tself.__visitFuncDef == 0 and\n\t\t\t\t\tn.declname not in self.Parser.funcName and\n\t\t\t\t\t#n.declname not in self.Parser.varNames[''] and\n\t\t\t\t\tself.__currentFunction != '' and\n\t\t\t\t\tself.__visitStructUnionEnum == 0):\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case II)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#S: env.local, the followin two lines are replaced with the following if\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_'\n\t\t\t\t#n.declname = self.prefix + self.__currentFunction + '_' + n.declname if n.declname else ''\n\t\t\t\tif self.__init: \n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse: \n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.prefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\telse:\n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.nondetprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t#print n.declname\n\t\t\t\t#print self.newIDs\n\t\n\n\t\t\tnstr = n.declname if n.declname else ''\n\n\t\t\t# Resolve modifiers.\n\t\t\t# Wrap in parens to distinguish pointer to array and pointer to\n\t\t\t# function syntax.\n\t\t\t#\n\t\t\tfor i, modifier in enumerate(modifiers):\n\t\t\t\tif isinstance(modifier, pycparser.c_ast.ArrayDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '[' + self.visit(modifier.dim) + ']'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.FuncDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '(' + self.visit(modifier.args) + ')'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.PtrDecl):\n\t\t\t\t\tif modifier.quals:\n\t\t\t\t\t\tnstr = '* %s %s' % (' '.join(modifier.quals), nstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnstr = '*' + nstr\n\t\t\tif nstr: s += ' ' + nstr\n\t\t\treturn s\n\t\telif typ == pycparser.c_ast.Decl:\n\t\t\treturn self._generate_decl(n.type)\n\t\telif typ == pycparser.c_ast.Typename:\n\t\t\treturn self._generate_type(n.type)\n\t\telif typ == pycparser.c_ast.IdentifierType:\n\t\t\treturn ' '.join(n.names) + ' '\n\t\telif typ in (pycparser.c_ast.ArrayDecl, pycparser.c_ast.PtrDecl, pycparser.c_ast.FuncDecl):\n\t\t\treturn self._generate_type(n.type, modifiers + [n])\n\t\telse:\n\t\t\treturn self.visit(n)\n\n\n\n\t\tdef visit_Compound(self, n):\n\t\t\tself.__visitingCompound += 1\n\t\t\ts = super(self.__class__, self).visit_Compound(n)\n\t\t\tfor key in self.newIDs: #S: remove pairs that have been added in this compound\n\t\t\t\tstack = self.newIDs[key] \n\t\t\t\tif stack and stack[-1][1] == self.__visitingCompound: \n\t\t\t\t\tstack.pop()\n\t\t\tself.__visitingCompound -= 1\n\t\t\treturn s", "def _an_element_(self):\n from sage.rings.integer_ring import ZZ\n return self(self.realization_of().PD().get_point(ZZ.zero()))", "def _element_constructor_(self, x):\n R = self.base_ring()\n\n #Coerce ints to Integers\n if isinstance(x, int):\n x = Integer(x)\n if x in R:\n if x == 0:\n return self.zero()\n else:\n raise TypeError(\"do not know how to make x (= %s) an element of %s\"%(x, self))\n #x is an element of the basis enumerated set;\n elif x in self._indices:\n return self.monomial(self._indices(x))\n raise TypeError(\"do not know how to make x (= %s) an element of self (=%s)\"%(x,self))", "def entity(self, elem):\n return data.Entity(self, elem)", "def _convert(element):\n value = element.text\n if value is not None:\n value = TAG_TYPES.get(element.tag, unicode)(value)\n return (element.tag, value)", "def doc_from_xml(document_element_name, inner_xml=None):\n # Note: Namespaces don't work consistency in Python 2 and 3.\n \"\"\"\n nsmap = {\n None: \"http://www.w3.org/2001/XMLSchema-instance\",\n \"i\": \"http://www.w3.org/2001/XMLSchema-instance\"\n }\n\n xml.attrib[\"xmlns:i\"] = \"http://www.w3.org/2001/XMLSchema-instance\"\n xml.attrib[\"xmlns\"] = \"http://schemas.microsoft.com/windowsazure\"\n \"\"\"\n xml = ET.Element(document_element_name)\n xml.set(\"xmlns\", \"http://schemas.microsoft.com/windowsazure\")\n\n if inner_xml is not None:\n xml.append(inner_xml)\n\n return xml", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def new_element(self, short_name):\n return lxml.etree.Element(self._qualify(short_name))", "def _getType(self, tp, name, literal, local, namespaceURI):\r\n ofwhat = []\r\n if not (tp.isDefinition() and tp.isComplex()):\r\n raise EvaluateException, 'only supporting complexType definition'\r\n elif tp.content.isComplex():\r\n if hasattr(tp.content, 'derivation') and tp.content.derivation.isRestriction():\r\n derived = tp.content.derivation\r\n typeClass = self._getTypeClass(*derived.getAttribute('base'))\r\n if typeClass == TC.Array:\r\n attrs = derived.attr_content[0].attributes[WSDL.BASE]\r\n prefix, localName = SplitQName(attrs['arrayType'])\r\n nsuri = derived.attr_content[0].getXMLNS(prefix=prefix)\r\n localName = localName.split('[')[0]\r\n simpleTypeClass = self._getTypeClass(namespaceURI=nsuri, localName=localName)\r\n if simpleTypeClass:\r\n ofwhat = simpleTypeClass()\r\n else:\r\n tp = self._wsdl.types[nsuri].types[localName]\r\n ofwhat = self._getType(tp=tp, name=None, literal=literal, local=True, namespaceURI=nsuri)\r\n else:\r\n raise EvaluateException, 'only support soapenc:Array restrictions'\r\n return typeClass(atype=name, ofwhat=ofwhat, pname=name, childNames='item')\r\n else:\r\n raise EvaluateException, 'complexContent only supported for soapenc:Array derivations'\r\n elif tp.content.isModelGroup():\r\n modelGroup = tp.content\r\n for item in modelGroup.content:\r\n ofwhat.append(self._getElement(item, literal=literal, local=True))\r\n\r\n tc = TC.Struct(pyclass=None, ofwhat=ofwhat, pname=name)\r\n if not local:\r\n self._globalElement(tc, namespaceURI=namespaceURI, literal=literal)\r\n return tc\r\n\r\n raise EvaluateException, 'only supporting complexType w/ model group, or soapenc:Array restriction'", "def dump_element(element):\n return _schema_lookup[element.__class__]().dump(element)", "def _create_elements_py(filename=\"_elements.py\"):\n import re\n from pickle import dumps\n import urllib\n from os.path import exists, join\n from BeautifulSoup import BeautifulSoup, HTMLParseError\n from ..physics import a0\n import quantities as pq\n\n atom_list = [ # 'Silicon', 'Hydrogen', 'Gold' ] \n 'Ruthenium', 'Rhenium', 'Rutherfordium', 'Radium', 'Rubidium',\n 'Radon', 'Rhodium', 'Beryllium', 'Barium', 'Bohrium', 'Bismuth',\n 'Berkelium', 'Bromine', 'Hydrogen', 'Phosphorus', 'Osmium', 'Mercury',\n 'Germanium', 'Gadolinium', 'Gallium', 'Ununbium', 'Praseodymium',\n 'Platinum', 'Plutonium', 'Carbon', 'Lead', 'Protactinium', 'Palladium',\n 'Xenon', 'Polonium', 'Promethium', 'Hassium',\n 'Holmium', 'Hafnium', 'Molybdenum', 'Helium', 'Mendelevium', 'Magnesium',\n 'Potassium', 'Manganese', 'Oxygen', 'Meitnerium', 'Sulfur', 'Tungsten',\n 'Zinc', 'Europium', 'Einsteinium', 'Erbium', 'Nickel', 'Nobelium',\n 'Sodium', 'Niobium', 'Neodymium', 'Neon', 'Neptunium', 'Francium', 'Iron',\n 'Fermium', 'Boron', 'Fluorine', 'Strontium', 'Nitrogen', 'Krypton',\n 'Silicon', 'Tin', 'Samarium', 'Vanadium', 'Scandium', 'Antimony',\n 'Seaborgium', 'Selenium', 'Cobalt', 'Curium', 'Chlorine', 'Calcium',\n 'Californium', 'Cerium', 'Cadmium', 'Thulium', 'Caesium', 'Chromium',\n 'Copper', 'Lanthanum', 'Lithium', 'Thallium', 'Lutetium', 'Lawrencium',\n 'Thorium', 'Titanium', 'Tellurium', 'Terbium', 'Technetium', 'Tantalum',\n 'Ytterbium', 'Dubnium', 'Zirconium', 'Dysprosium', 'Iodine', 'Uranium',\n 'Yttrium', 'Actinium', 'Silver', 'Iridium', 'Americium', 'Aluminium',\n 'Arsenic', 'Argon', 'Gold', 'Astatine', 'Indium']\n\n orbital_radii = _orbital_radii()\n pettifor_numbers = _pettifor_numbers()\n\n re_swf = re.compile(\"(rainbow|NI3|volcano|\\_flash|K\\_H2O).swf\\s*(?!\\\")\")\n re_atomweight = re.compile(\":\\s*\\[?\\s*(\\d+(?:\\.\\d+)?)\\s*\\]?\")\n results = {}\n for name in atom_list: \n\n # first opens and reads file.\n if not exists(join(\"elements\", name)): \n file = urllib.urlopen(\"http://www.webelements.com/{0}\".format(name.lower()))\n string = file.read()\n file.close()\n else:\n with open(join(\"elements\", name), \"r\") as file: string = file.read()\n string = string.replace(\"alt\\\"\", \"alt=\\\"\")\n soup = BeautifulSoup(re.sub(re_swf,\"rainbow.swf\\\"\",string))\n\n atom = Element(name=name)\n atom.symbol = soup.findChild( name=\"a\", attrs={\"title\": \"Element names and symbols\"},\\\n text=\" Symbol\").parent.parent.contents[1].split()[1]\n atom.atomic_number = soup.findChild(name=\"a\", attrs={\"title\": \"Element atomic numbers\"})\\\n .parent.contents[-1].split()[1]\n atom.atomic_number = int(atom.atomic_number)\n atom.atomic_weight = soup.findChild(name=\"a\", attrs={\"title\": \"Element atomic weights\"})\\\n .parent.prettify()\n found = re_atomweight.search(atom.atomic_weight)\n if found is None: print name\n else: atom.atomic_weight = float(found.group(1))\n\n \n # ionization stuff\n if not exists(join(\"elements\", name + \"_atoms.html\")):\n file = urllib.urlopen(\"http://www.webelements.com/{0}/atoms.html\".format(name.lower()))\n string = file.read()\n file.close()\n else: \n with open(join(\"elements\", name + \"_atoms.html\"), \"r\") as file: string = file.read()\n soup = BeautifulSoup(string) \n # electron affinity\n found = re.search(\"of\\s+{0}\\s+is\\s+(\\S+)\".format(name.lower()), string)\n if found.group(1) == \"no\": atom.electron_affinity = None\n else: atom.electron_affinity = float(found.group(1)) * pq.kilo * pq.J / pq.mol\n # ionization energies\n energies = []\n for child in soup.findChild(name=\"table\", attrs={\"class\":\"chemistry-data\"})\\\n .findChildren(name='td'):\n energies.append(float(child.string) * pq.kilo * pq.J / pq.mol)\n atom.ionization_energies = energies if len(energies) > 0 else None\n\n\n # electronegativities.\n if not exists(join(\"elements\", name + \"_electronegativity.html\")):\n file = urllib.urlopen(\"http://www.webelements.com/{0}/electronegativity.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n else: \n with open(join(\"elements\", name + \"_electronegativity.html\"), \"r\") as file:\n string = file.read()\n soup = BeautifulSoup(string) \n attrs = { \"href\": \"../periodicity/electronegativity_pauling/\",\\\n \"title\": \"View definition and pictures showing periodicity \"\\\n \"of Pauling electronegativity\"}\n pauling = soup.findChild(name=\"a\", attrs=attrs).parent.parent.contents[-1].string\n pauling = pauling.split()[0]\n atom.pauling = float(pauling) if pauling != \"no\" else None\n\n attrs = { \"href\": \"../periodicity/electronegativity_sanderson/\" }\n sanderson = soup.findChild(name=\"a\", attrs=attrs).parent.parent.contents[-1].string\n sanderson = sanderson.split()[0]\n atom.sanderson = float(sanderson) if sanderson != \"no\" else None\n\n attrs = { \"href\": \"../periodicity/electroneg_allred_rochow/\" }\n allred_rochow = soup.findChild(name=\"a\", attrs=attrs).parent.parent.contents[-1].string\n allred_rochow = allred_rochow.split()[0]\n atom.allred_rochow = float(allred_rochow) if allred_rochow != \"no\" else None\n\n attrs = { \"href\": \"../periodicity/electroneg_mulliken_jaffe/\" }\n mulliken_jaffe = soup.findChild(name=\"a\", attrs=attrs).parent.parent.contents[-1]\n if name in [\"Germanium\", \"Gallium\", \"Carbon\", \"Lead\", \"Boron\", \"Silicon\", \"Tin\",\\\n \"Thallium\", \"Aluminium\", \"Indium\"]: \n mulliken_jaffe = mulliken_jaffe.contents[0]\n else: mulliken_jaffe = mulliken_jaffe.string\n mulliken_jaffe = mulliken_jaffe.split()[0]\n atom.mulliken_jaffe = float(mulliken_jaffe) if mulliken_jaffe != \"no\" else None\n\n attrs = { \"href\": \"../periodicity/electronegativity_allen/\" }\n allen = soup.findChild(name=\"a\", attrs=attrs).parent.parent.contents[-1].string\n allen = allen.split()[0]\n atom.allen = float(allen) if allen != \"no\" else None\n \n # atom sizes\n if not exists(join(\"elements\", name + \"_atom_sizes.html\")):\n file = urllib.urlopen(\"http://www.webelements.com/{0}/atom_sizes.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n else: \n with open(join(\"elements\", name + \"_atom_sizes.html\"), \"r\") as file:\n string = file.read()\n soup = BeautifulSoup(string) \n \n # atomic radius\n attrs = { \"href\": \"../periodicity/atomic_radius_empirical/\" }\n atomic_radius = soup.findChild(name=\"a\", attrs=attrs).parent.contents[-1].split()[1]\n if atomic_radius != \"no\":\n atom.atomic_radius = float(atomic_radius) * pq.picometre \n \n attrs = { \"href\": \"../periodicity/covalent_radius_2008/\" }\n covalent_radius = soup.findChild(name=\"a\", attrs=attrs).parent.contents[-1].split()[1]\n atom.covalent_radius = float(covalent_radius) * pq.picometre if covalent_radius != \"no\" else None\n\n attrs = { \"href\": \"../periodicity/radii_covalent_single/\" }\n single_bond_radius = soup.findChild(name=\"a\", attrs=attrs)\n if single_bond_radius is not None:\n single_bond_radius = single_bond_radius.parent.contents[-1].split()[1]\n if single_bond_radius != \"no\": \n atom.single_bond_radius = float(single_bond_radius) * pq.picometre\n\n attrs = { \"href\": \"../periodicity/radii_covalent_double/\" }\n double_bond_radius = soup.findChild(name=\"a\", attrs=attrs)\n if double_bond_radius is not None:\n double_bond_radius = double_bond_radius.parent.contents[-1].split()[1]\n if double_bond_radius != \"no\": \n atom.double_bond_radius = float(double_bond_radius) * pq.picometre\n\n attrs = { \"href\": \"../periodicity/radii_covalent_triple/\" }\n triple_bond_radius = soup.findChild(name=\"a\", attrs=attrs)\n if triple_bond_radius is not None:\n triple_bond_radius = triple_bond_radius.parent.contents[-1].split()[1]\n if triple_bond_radius != \"no\": \n atom.triple_bond_radius = float(triple_bond_radius) * pq.picometre\n\n attrs = { \"href\": \"../periodicity/van_der_waals_radius/\" }\n van_der_waals_radius = soup.findChild(name=\"a\", attrs=attrs)\n if van_der_waals_radius is not None:\n van_der_waals_radius = van_der_waals_radius.parent.contents[-1].split()[1]\n if van_der_waals_radius != \"no\": \n atom.van_der_waals_radius = float(van_der_waals_radius) * pq.picometre\n\n # thermochemistry\n if not exists(join(\"elements\", name + \"_thermochemistry.html\")):\n file = urllib.urlopen(\"http://www.webelements.com/{0}/thermochemistry.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n else: \n with open(join(\"elements\", name + \"_thermochemistry.html\"), \"r\") as file:\n string = file.read()\n soup = BeautifulSoup(string) \n \n attrs = { \"href\": \"../periodicity/enthalpy_fusion/\" }\n fusion = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n fusion = re.search(\":\\s*(?:about)?\\s*(\\S+)\", fusion)\n if fusion is not None and fusion.group(1) != \"no\":\n atom.fusion = float(fusion.group(1)) * pq.kilo * pq.J / pq.mol \n\n attrs = { \"href\": \"../periodicity/enthalpy_vaporisation/\" }\n vaporization = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n vaporization = re.search(\":\\s*(?:about)?\\s*(\\S+)\", vaporization)\n if vaporization is not None and vaporization.group(1) != \"no\":\n atom.vaporization = float(vaporization.group(1)) * pq.kilo * pq.J / pq.mol \n\n attrs = { \"href\": \"../periodicity/enthalpy_atomisation/\" }\n atomization = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n atomization = re.search(\":\\s*(?:about)?\\s*(\\S+)\", atomization)\n if atomization is not None and atomization.group(1) != \"no\":\n atom.atomization = float(atomization.group(1)) * pq.kilo * pq.J / pq.mol \n\n # physics\n if not exists(join(\"elements\", name + \"_physics.html\")):\n file = urllib.urlopen(\"http://www.webelements.com/{0}/physics.html\"\\\n .format(name.lower()))\n string = file.read()\n file.close()\n else: \n with open(join(\"elements\", name + \"_physics.html\"), \"r\") as file:\n string = file.read()\n soup = BeautifulSoup(string) \n\n attrs = { \"href\": \"../periodicity/melting_point/\" }\n melting_point = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n melting_point = re.search(\":\\s*(?:\\(white P\\)|about|maybe about)?\\s*(\\S+)\", melting_point)\n if melting_point is not None and melting_point.group(1) != \"no\":\n atom.melting_point = float(melting_point.group(1)) * pq.Kelvin\n\n attrs = { \"href\": \"../periodicity/boiling_point/\" }\n boiling_point = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n boiling_point = re.search(\":\\s*(?:about)?\\s*(\\S+)\", boiling_point)\n if boiling_point is not None and boiling_point.group(1) != \"no\":\n atom.boiling_point = float(boiling_point.group(1)) * pq.Kelvin\n\n attrs = { \"href\": \"../periodicity/critical_temperature/\" }\n critical_temperature = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n critical_temperature = re.search(\":\\s*(?:about)?\\s*(\\S+)\", critical_temperature)\n if critical_temperature is not None and critical_temperature.group(1) != \"no\":\n atom.critical_temperature = float(critical_temperature.group(1)) * pq.Kelvin\n\n attrs = { \"href\": \"../periodicity/thermal_conductivity/\" }\n thermal_conductivity = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n thermal_conductivity = re.search(\":\\s*(?:about)?\\s*(\\S+)\", thermal_conductivity)\n if thermal_conductivity is not None and thermal_conductivity.group(1) != \"no\":\n atom.thermal_conductivity = float(thermal_conductivity.group(1)) * pq.W / pq.m / pq.K\n\n attrs = { \"href\": \"../periodicity/coeff_thermal_expansion/\" }\n thermal_expansion = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n thermal_expansion = re.search(\":\\s*(?:about)?\\s*(\\S+)\", thermal_expansion)\n if thermal_expansion is not None and thermal_expansion.group(1) != \"no\":\n atom.thermal_expansion = float(thermal_expansion.group(1)) * pq.micro / pq.K\n\n attrs = { \"href\": \"../periodicity/density/\" }\n density = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n density = re.search(\":\\s*(?:about)?\\s*(\\S+)\", density)\n if density is not None and density.group(1) != \"no\":\n atom.density = float(density.group(1)) / 1000 * pq.g * pq.cm**3\n\n attrs = { \"href\": \"../periodicity/molar_volume/\" }\n molar_volume = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n molar_volume = re.search(\":\\s*(?:about)?\\s*(\\S+)\", molar_volume)\n if molar_volume is not None and molar_volume.group(1) != \"no\":\n atom.molar_volume = float(molar_volume.group(1)) * pq.cm**3 / pq.mol\n\n attrs = { \"href\": \"../periodicity/velocity_sound/\" }\n sound_velocity = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n sound_velocity = re.search(\":\\s*(?:about)?\\s*(\\S+)\", sound_velocity)\n if sound_velocity is not None and sound_velocity.group(1) != \"no\":\n atom.sound_velocity = float(sound_velocity.group(1)) * pq.m / pq.s\n\n attrs = { \"href\": \"../periodicity/youngs_modulus/\" }\n young_modulus = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n young_modulus = re.search(\":\\s*(?:about)?\\s*(\\S+)\", young_modulus)\n if young_modulus is not None and young_modulus.group(1) != \"no\":\n atom.young_modulus = float(young_modulus.group(1)) * pq.GPa\n\n attrs = { \"href\": \"../periodicity/rigidity_modulus/\" }\n rigidity_modulus = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n rigidity_modulus = re.search(\":\\s*(?:about)?\\s*(\\S+)\", rigidity_modulus)\n if rigidity_modulus is not None and rigidity_modulus.group(1) != \"no\":\n atom.rigidity_modulus = float(rigidity_modulus.group(1)) * pq.GPa\n \n attrs = { \"href\": \"../periodicity/bulk_modulus/\" }\n bulk_modulus = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n bulk_modulus = re.search(\":\\s*(?:about)?\\s*(\\S+)\", bulk_modulus)\n if bulk_modulus is not None and bulk_modulus.group(1) != \"no\":\n atom.bulk_modulus = float(bulk_modulus.group(1)) * pq.GPa\n \n attrs = { \"href\": \"../periodicity/poissons_ratio/\" }\n poisson_ratio = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n poisson_ratio = re.search(\":\\s*(?:about)?\\s*(\\S+)\", poisson_ratio)\n if poisson_ratio is not None and poisson_ratio.group(1) != \"no\":\n atom.poisson_ratio = float(poisson_ratio.group(1)) * pq.dimensionless\n \n attrs = { \"href\": \"../periodicity/electrical_resistivity/\" }\n electrical_resistivity = soup.findChild(name=\"a\", attrs=attrs).parent.prettify()\n electrical_resistivity = re.search(\":\\s*(?:about)?\\s*(\\d+(?:\\.\\d+)?)\", electrical_resistivity)\n if electrical_resistivity is not None and electrical_resistivity.group(1) not in [\"no\", \"&gt;\"]:\n atom.electrical_resistivity = float(electrical_resistivity.group(1)) * 1e-8 * pq.ohm * pq.m\n\n results[str(atom.symbol)] = atom\n \n if atom.symbol in orbital_radii:\n au = a0(\"A\") * pq.angstrom \n results[str(atom.symbol)].orbital_radii = tuple([u * au for u in orbital_radii[atom.symbol]])\n if atom.symbol in pettifor_numbers:\n results[str(atom.symbol)].pettifor = pettifor_numbers[atom.symbol]\n\n\n with open(filename, \"w\") as file:\n file.write(\"\\\"\\\"\\\" Definition of the elements. \\\"\\\"\\\"\\n\")\n file.write(\"\\nfrom numpy import array\\n\")\n file.write(\"\\nfrom quantities import *\\n\")\n file.write(\"\\nfrom . import Element\\n\")\n file.write(\"\\n__dir__ = ['elements', 'symbols']\\n\")\n file.write(\"\\nelements = \" + repr(results) + \"\\n\")\n keys = []\n for n in range(1, len(results)):\n for key, value in results.items():\n if value.atomic_number == n: keys.append(str(key))\n file.write(\"\\nsymbols = {0}\\n\".format(keys))", "def FromXML(cls, doc, element):\n return cls(INumberDef(element.text))", "def process_element(elements, element_name, node, element_type, xsd_uri, dialect, namespace, qualified=None,\n soapenc_uri = 'http://schemas.xmlsoap.org/soap/encoding/'):\n\n log.debug('Processing element %s %s' % (element_name, element_type))\n for tag in node:\n if tag.get_local_name() in ('annotation', 'documentation'):\n continue\n elif tag.get_local_name() in ('element', 'restriction'):\n log.debug('%s has no children! %s' % (element_name, tag))\n children = tag # element \"alias\"?\n alias = True\n elif tag.children():\n children = tag.children()\n alias = False\n else:\n log.debug('%s has no children! %s' % (element_name, tag))\n continue # TODO: abstract?\n d = OrderedDict()\n d.namespace = namespace\n d.qualified = qualified\n for e in children:\n t = e['type']\n if not t:\n t = e['base'] # complexContent (extension)!\n if not t:\n t = e['ref'] # reference to another element\n if not t:\n # \"anonymous\" elements had no type attribute but children\n if e['name'] and e.children():\n # create a type name to process the children\n t = \"%s_%s\" % (element_name, e['name']) \n c = e.children()\n et = c.get_local_name()\n c = c.children()\n process_element(elements, t, c, et, xsd_uri, dialect, namespace, qualified)\n else:\n t = 'anyType' # no type given!\n t = t.split(\":\")\n if len(t) > 1:\n ns, type_name = t\n else:\n ns, type_name = None, t[0]\n if element_name == type_name and not alias and len(children) > 1:\n continue # abort to prevent infinite recursion\n uri = ns and e.get_namespace_uri(ns) or xsd_uri\n if uri in (xsd_uri, soapenc_uri) and type_name != 'Array':\n # look for the type, None == any\n fn = REVERSE_TYPE_MAP.get(type_name, None)\n elif uri == soapenc_uri and type_name == 'Array':\n # arrays of simple types (look at the attribute tags):\n fn = []\n for a in e.children():\n for k, v in a[:]:\n if k.endswith(\":arrayType\"):\n type_name = v\n if \":\" in type_name:\n type_name = type_name[type_name.index(\":\")+1:]\n if \"[]\" in type_name:\n type_name = type_name[:type_name.index(\"[]\")] \n fn.append(REVERSE_TYPE_MAP.get(type_name, None))\n else:\n fn = None\n\n if not fn:\n # simple / complex type, postprocess later\n if ns:\n fn_namespace = uri # use the specified namespace\n else:\n fn_namespace = namespace # use parent namespace (default)\n for k, v in e[:]:\n if k.startswith(\"xmlns:\"):\n # get the namespace uri from the element\n fn_namespace = v \n fn = elements.setdefault(make_key(type_name, 'complexType', fn_namespace), OrderedDict())\n\n if e['maxOccurs'] == 'unbounded' or (uri == soapenc_uri and type_name == 'Array'):\n # it's an array... TODO: compound arrays? and check ns uri!\n if isinstance(fn, OrderedDict):\n if len(children) > 1 and dialect in ('jetty',):\n # Jetty style support\n # {'ClassName': [{'attr1': val1, 'attr2': val2}]\n fn.array = True\n else:\n # .NET style support (backward compatibility)\n # [{'ClassName': {'attr1': val1, 'attr2': val2}]\n d.array = True\n else:\n if dialect in ('jetty',):\n # scalar support [{'attr1': [val1]}]\n fn = [fn]\n else:\n d.array = True\n\n if (e['name'] is not None and not alias) or e['ref']:\n e_name = e['name'] or type_name # for refs, use the type name\n d[e_name] = fn\n else:\n log.debug('complexContent/simpleType/element %s = %s' % (element_name, type_name))\n d[None] = fn\n if e is not None and e.get_local_name() == 'extension' and e.children():\n # extend base element:\n process_element(elements, element_name, e.children(), element_type, xsd_uri, dialect, namespace, qualified)\n elements.setdefault(make_key(element_name, element_type, namespace), OrderedDict()).update(d)", "def convert_element_to_object(element):\n data_object = instantiate_from_string(element.tag)\n\n for attr_name, attr_value in object_attributes(data_object):\n if is_list_of_text_data(element, attr_name):\n loaded_data = load_xml_text_data(element, attr_name)\n elif attr_value is None: # A complex element that appears once.\n if element.find(attr_name) is None:\n # If this complex element is optional and,\n # in this case, omitted.\n loaded_data = None\n else:\n loaded_data = convert_element_to_object(\n element.find(attr_name))\n elif type(attr_value) is dict:\n loaded_data = load_attribute_dict(element, attr_name)\n elif type(attr_value) is list:\n loaded_data = load_child_objects(element, attr_name)\n elif is_element_attribute(element, attr_name):\n loaded_data = load_element_attribute(element, attr_name)\n else: # Optional attribute was omitted.\n loaded_data = attr_value\n\n setattr(data_object, attr_name, loaded_data)\n\n return data_object", "def XmlTypeNamespace(self) -> str:", "def __init__(self, node, declare):\n symbol.__init__(self, node, declare, \"enumEntry\", \"Enumeration Entry\")\n self.value = getTag(node, \"value\")\n # if there was a value then convert to a number\n if self.value == \"\":\n self.value = None\n else:\n if self.value[0:2] == \"0x\":\n self.value = int(self.value, 16)\n else:\n self.value = int(self.value)", "def visit_Instantiation(self, node):\n extend_ops = self.extend_ops\n name = self.name_gen.next()\n self.push_name(name)\n op_code = compile(node.name, self.filename, mode='eval')\n extend_ops([\n # btn_cls = eval('PushButton', toolkit, f_globals)\n # btn = btn_cls.__enaml_call__(None, toolkit)\n # When instantiating a Declaration, it is called without\n # identifiers, so that it creates it's own new identifiers\n # scope. This means that derived declarations share ids,\n # but the composed children have an isolated id space.\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, op_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_CONST, None),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n \n if node.identifier:\n extend_ops([\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n\n visit = self.visit\n for item in node.body:\n visit(item)\n \n self.pop_name()\n extend_ops([\n # foo.add_subcomponent(button)\n (LOAD_FAST, self.curr_name()),\n (LOAD_ATTR, 'add_subcomponent'),\n (LOAD_FAST, name),\n (CALL_FUNCTION, 0x0001),\n (POP_TOP, None),\n ])", "def __init__(self, node, declare, type, prettyType):\n symbol.__init__(self, node, declare, type, prettyType)\n self.inst = getOptionalTag(node, \"instance\")\n self.members = []\n # at this point we want the members node, but the function below returns\n # all nodes of type 'members' even if they are not a direct descendant\n # of this node\n #\n # hence we search through looking for the one that has the correct\n # parent\n members = getNode(node, \"members\")\n\n for mem in filter(lambda n: n.nodeType == n.ELEMENT_NODE, members.childNodes):\n self.members.append(declare(mem))", "def element_type(hint):\n hint = Const.unwrap(hint)\n if isinstance(hint, typehints.SequenceTypeConstraint):\n return hint.inner_type\n elif isinstance(hint, typehints.TupleHint.TupleConstraint):\n return typehints.Union[hint.tuple_types]\n elif isinstance(hint,\n typehints.UnionHint.UnionConstraint) and not hint.union_types:\n return hint\n return Any", "def _xml_ele_to_obj(cls, element):\n metadata_dict = {}\n metadata_dict[(element.attrib).get('key')] = element.text\n return MetadataItem(metadata_dict)", "def visit_Declaration(self, node):\n # This creates a function from the generated code ops then\n # wraps that function in an EnamlDeclaration.\n func_code = DeclarationCompiler.compile(node, self.filename)\n name = node.name\n self.code_ops.extend([\n (LOAD_CONST, func_code),\n (MAKE_FUNCTION, 0),\n (STORE_NAME, name),\n (LOAD_NAME, 'EnamlDeclaration'),\n (LOAD_NAME, name),\n (CALL_FUNCTION, 0x0001),\n (STORE_NAME, name),\n ])", "def _getTypeCode(self, parameters, literal=False):\r\n ofwhat = []\r\n for part in parameters:\r\n namespaceURI,localName = part.type\r\n\r\n if part.element_type:\r\n #global element\r\n element = self._wsdl.types[namespaceURI].elements[localName]\r\n tc = self._getElement(element, literal=literal, local=False, namespaceURI=namespaceURI)\r\n else:\r\n #local element\r\n name = part.name\r\n typeClass = self._getTypeClass(namespaceURI, localName)\r\n if not typeClass:\r\n tp = self._wsdl.types[namespaceURI].types[localName]\r\n tc = self._getType(tp, name, literal, local=True, namespaceURI=namespaceURI)\r\n else:\r\n tc = typeClass(name)\r\n ofwhat.append(tc)\r\n return ofwhat", "def shape_element(element):\n node = {}\n # you should process only 2 types of top level tags: \"node\" and \"way\"\n if element.tag == \"node\" or element.tag == \"way\" :\n for key in element.attrib.keys():\n val = element.attrib[key]\n node[\"type\"] = element.tag\n\n # deal with top-level tags \n node = process_toptags(key,val, node)\n \n # Begin iterating over subtags\n node = process_subtags(element, node)\n \n for tag in element.iter(\"nd\"):\n if not \"node_refs\" in node.keys():\n node[\"node_refs\"] = []\n node_refs = node[\"node_refs\"]\n node_refs.append(tag.attrib[\"ref\"])\n node[\"node_refs\"] = node_refs\n\n return node\n else:\n return None", "def from_element(element, index=0, **kwargs):\n from gblearn.elements import pissnnl\n P = pissnnl(element, **kwargs)[index]\n decomposer = SOAPDecomposer(1, **kwargs)\n return SOAPVector(P, decomposer)", "def FromXML(cls, doc, element):\n return cls()", "def __init__(self, node, declare, type, prettyType=\"\"):\n self.name = getTag(node, \"name\")\n self.info = getTag(node, \"info\")\n self.comment = comment(node, declare)\n self.type = type\n self.prettyType = prettyType\n if prettyType == \"\":\n self.prettyType = type\n m = hash()\n m.update(self.name)\n m.update(self.info)\n m.update(self.type)\n self.link = \"a\"+m.hexdigest()", "def element_from_string(string):\n return ElementTree.fromstring(string)", "def shape_element(element):\n node = {}\n if element.tag == \"node\" or element.tag == \"way\" :\n lat = str(element.get(\"lat\"))\n lon = str(element.get(\"lon\"))\n try:\n node[\"pos\"] = [float(lat),float(lon)]\n except:\n pass\n created = {}\n created[\"changeset\"] = element.get(\"changeset\")\n created[\"user\"] = element.get(\"user\")\n created[\"version\"] = element.get(\"version\")\n created[\"uid\"] = element.get(\"uid\")\n created[\"timestamp\"] = element.get(\"timestamp\")\n node[\"created\"] = created\n node[\"visible\"] = element.get(\"visible\")\n node[\"type\"] = element.tag\n node[\"id\"] = element.get(\"id\")\n \n ##Parse address elements\n address = {}\n for subelement in element.iter(\"tag\"):\n k_element = subelement.get(\"k\")\n v_element = subelement.get(\"v\")\n if not problemchars.match(k_element):\n if k_element.startswith(\"addr:\"):\n if is_street_name(subelement):\n v_element = update_name(v_element,mapping)\n k_elements = k_element.split(\":\")\n if(len(k_elements) < 3):\n address[k_elements[1]] = v_element\n else:\n node[k_element] = v_element\n if(bool(address)):\n node[\"address\"] = address\n \n if element.tag == \"way\":\n node_refs = []\n for subelement in element.iter(\"nd\"):\n node_refs.append(subelement.get(\"ref\"))\n node[\"node_refs\"] = node_refs\n \n return node\n else:\n return None", "def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\r\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\r\n\r\n node_atts = {}\r\n way_atts = {}\r\n way_nodes = []\r\n tags = [] # Handle secondary tags the same way for both node and way elements\r\n\r\n if element.tag == 'node': #fill dictionary with k/v pairs from NODE_FIELDS\r\n for i in node_attr_fields:\r\n node_atts[i] = element.attrib[i]\r\n\r\n if element.tag == 'way':\r\n for i in way_attr_fields:\r\n way_atts[i] = element.attrib[i]\r\n\r\n for tag in element.iter(\"tag\"): #loop through tags looking for problem values\r\n dic = {}\r\n attributes = tag.attrib\r\n if tag.attrib['k'] == \"addr:street\":\r\n tag.attrib['v'] = update_name_street(tag.attrib['v'], streetmapping)\r\n elif tag.attrib['k'] == \"addr:city\":\r\n tag.attrib['v'] = update_name_city(tag.attrib['v'], citymapping)\r\n elif tag.attrib['k'] == \"addr:postcode\":\r\n tag.attrib['v'] = update_zips(tag.attrib['v'])\r\n elif tag.attrib['k'] == \"cuisine\":\r\n tag.attrib['v'] = update_name_cuisine(tag.attrib['v'], cuisinemapping)\r\n \r\n if problem_chars.search(tag.attrib['k']):\r\n continue\r\n\r\n if element.tag == 'node': #add node id for attributes\r\n dic['id'] = node_atts['id']\r\n else:\r\n dic['id'] = way_atts['id'] #add way id for attributes\r\n\r\n dic['value'] = attributes['v'] #value of key for each type\r\n\r\n colon_k=LOWER_COLON.search(tag.attrib['k'])\r\n \r\n if colon_k:\r\n #print colon_k.group(0)\r\n #print tag.attrib['k']\r\n dic['key'],dic['type'] = right_key(tag.attrib['k']) #call function to split at colon\r\n else:\r\n dic['key'] = attributes['k'] #assign regular that there was no colon problem\r\n dic['type'] = 'regular'\r\n\r\n tags.append(dic)\r\n\r\n if element.tag == 'way':\r\n position = 0\r\n for nd in element.iter(\"nd\"): #loop through nd child tags numbering them\r\n way_node_dic = {}\r\n way_node_dic['id'] = way_atts['id']\r\n way_node_dic['node_id'] = nd.attrib['ref']\r\n way_node_dic['position'] = position\r\n position = position + 1\r\n way_nodes.append(way_node_dic)\r\n \r\n \r\n \r\n if element.tag == 'node': #process the above for node tags for final formatting\r\n return {'node': node_atts, 'node_tags': tags}\r\n\r\n elif element.tag == 'way': #process the above for way tags for final formatting\r\n return {'way': way_atts, 'way_nodes': way_nodes, 'way_tags': tags}", "def visit_Typedef(self, node):\n return str_node(node)", "def _element_constructor_(self, x):\n if not isinstance(x, (RealNumber, tuple)):\n if isinstance(x, ComplexDoubleElement):\n return ComplexNumber(self, x.real(), x.imag())\n elif isinstance(x, str):\n # TODO: this is probably not the best and most\n # efficient way to do this. -- Martin Albrecht\n return ComplexNumber(self,\n sage_eval(x.replace(' ',''), locals={\"I\":self.gen(),\"i\":self.gen()}))\n\n late_import()\n if isinstance(x, NumberFieldElement_quadratic):\n if isinstance(x.parent(), NumberField_quadratic) and list(x.parent().polynomial()) == [1, 0, 1]:\n (re, im) = list(x)\n return ComplexNumber(self, re, im)\n\n try:\n return self(x.sage())\n except (AttributeError, TypeError):\n pass\n try:\n return x._complex_mpfr_field_( self )\n except AttributeError:\n pass\n return ComplexNumber(self, x)", "def createElement(self, order, octant, index, weights):\n return self.element", "def an_element(self):\n return self.a_realization().an_element()", "def get_xsd_annotation(elem):\n try:\n return elem[0] if elem[0].tag == XSD_ANNOTATION else None\n except (TypeError, IndexError):\n return", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def from_xml_node(cls, xml_node):\n global_ = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n interfaces = Interface.from_xml_node(get_xml_node(xml_node, xml_tags.Elements.INTERFACES))\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n return cls(global_, interfaces, name)", "def type(self) -> global___Type:", "def buildTypedefXmlDeclaration(self, title=0, offset=1, size=None):\n dec = \"<Paragraph>\"\n if len(self.name) == 0:\n dec = \"Aliases an anonymous enumeration.\"\n else:\n dec = \"Aliases an enumeration, called: '\"+self.name+\"'.\"\n dec += \"</Paragraph>\\n\"\n dec += \"<Paragraph>\"+self.info+\"</Paragraph>\\n\"\n dec += \"<table border=1 cellpadding=5>\\n\"\n for entry in self.entries:\n dec += entry.buildFullInfoDeclaration()\n dec += \"</table>\\n\"\n return dec", "def getTypeCode(self):\n return _libsbml.Compartment_getTypeCode(self)", "def declaration(self) -> str:\n if self._declaration is None:\n # First try to utilize the clang comment's version as it is assumed\n # to be the more correct.\n self._declaration = self.get_soup_declaration()\n\n if self._declaration is None:\n # soup failed so fall back to manual parsing\n self._declaration = self.get_parsed_declaration()\n\n return self._declaration", "def _from_c_repr(c_repr):\n # We create a dummy module with a global variable of the requested type,\n # parse that module, and return the type of the global variable.\n # Include stdint.h to recognize the intX_t typedefs.\n module = parse(\"\"\"\n #include <stdint.h>\n\n {} a;\n \"\"\".format(c_repr))\n return module.global_vars['a'].type", "def adt_object_to_element_name(adt_object):\n\n objtype = adt_object.objtype\n return f'{objtype.xmlnamespace.name}:{objtype.xmlname}'" ]
[ "0.6112495", "0.5760717", "0.54377365", "0.5435102", "0.54039854", "0.5333338", "0.5304633", "0.5193657", "0.51740164", "0.51256275", "0.51102036", "0.50124407", "0.5008194", "0.4974934", "0.49698183", "0.49652553", "0.49299234", "0.4924845", "0.49117178", "0.48976466", "0.48903218", "0.48894882", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.48892117", "0.4883074", "0.48784518", "0.48745582", "0.48611283", "0.48608068", "0.48451626", "0.4825897", "0.48221424", "0.4818608", "0.4818608", "0.4818608", "0.48130143", "0.48130143", "0.48130143", "0.48116386", "0.4806164", "0.4779893", "0.47758543", "0.47622415", "0.47604117", "0.4742492", "0.47411624", "0.47364846", "0.47298282", "0.47288847", "0.4698432", "0.46924904", "0.46891984", "0.4672621", "0.46663022", "0.4663079", "0.46628767", "0.46595788", "0.46578568", "0.4651461", "0.465072", "0.46242252", "0.46181604", "0.46060547", "0.45938647", "0.45737866", "0.45655188", "0.45619223", "0.45563364", "0.45499745", "0.4549386", "0.45388976", "0.45387092", "0.45379734", "0.45256013", "0.4519598", "0.451578", "0.4510584", "0.45103684", "0.450968", "0.44723126", "0.4466703", "0.4465629", "0.44592297", "0.44563624", "0.44561508", "0.44551235", "0.4431092", "0.4430564", "0.4429701", "0.4417571", "0.44132167" ]
0.6733242
0
Returns a typecode instance representing the passed in type and name. tp XMLSchema.TypeDefinition instance name element name literal literal encoding? local is locally defined? namespaceURI namespace
def _getType(self, tp, name, literal, local, namespaceURI): ofwhat = [] if not (tp.isDefinition() and tp.isComplex()): raise EvaluateException, 'only supporting complexType definition' elif tp.content.isComplex(): if hasattr(tp.content, 'derivation') and tp.content.derivation.isRestriction(): derived = tp.content.derivation typeClass = self._getTypeClass(*derived.getAttribute('base')) if typeClass == TC.Array: attrs = derived.attr_content[0].attributes[WSDL.BASE] prefix, localName = SplitQName(attrs['arrayType']) nsuri = derived.attr_content[0].getXMLNS(prefix=prefix) localName = localName.split('[')[0] simpleTypeClass = self._getTypeClass(namespaceURI=nsuri, localName=localName) if simpleTypeClass: ofwhat = simpleTypeClass() else: tp = self._wsdl.types[nsuri].types[localName] ofwhat = self._getType(tp=tp, name=None, literal=literal, local=True, namespaceURI=nsuri) else: raise EvaluateException, 'only support soapenc:Array restrictions' return typeClass(atype=name, ofwhat=ofwhat, pname=name, childNames='item') else: raise EvaluateException, 'complexContent only supported for soapenc:Array derivations' elif tp.content.isModelGroup(): modelGroup = tp.content for item in modelGroup.content: ofwhat.append(self._getElement(item, literal=literal, local=True)) tc = TC.Struct(pyclass=None, ofwhat=ofwhat, pname=name) if not local: self._globalElement(tc, namespaceURI=namespaceURI, literal=literal) return tc raise EvaluateException, 'only supporting complexType w/ model group, or soapenc:Array restriction'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def type(name):", "def get_typecode(self, name):\n return self.codes['type_codes'][name]", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def _PyType_Lookup(space, type, w_name):\n w_type = from_ref(space, rffi.cast(PyObject, type))\n assert isinstance(w_type, W_TypeObject)\n\n if not space.isinstance_w(w_name, space.w_text):\n return None\n name = space.text_w(w_name)\n w_obj = w_type.lookup(name)\n # this assumes that w_obj is not dynamically created, but will stay alive\n # until w_type is modified or dies. Assuming this, we return a borrowed ref\n return w_obj", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def get_type_by_name(self, name):\n raise NotImplementedError()", "def mainTypeName(type_name):\n return mainRender(type_name)", "def getTypeFromName(self, *args):\n return _libsbml.ASTBasePlugin_getTypeFromName(self, *args)", "def typeToName(type: int) -> unicode:\n ...", "def name_type(self):\n return self.tag(\"name_type\")", "def _create_type(self, keyword_or_identifier, **kwargs):\n name = keyword_or_identifier\n if isinstance(name, Identifier):\n return self._idl_type_factory.reference_type(name, **kwargs)\n elif isinstance(name, str):\n return self._idl_type_factory.simple_type(name, **kwargs)\n else:\n assert False", "def type(self) -> global___Type:", "def create_type(name):\n\n new_type = Type(name=name)\n db.session.add(new_type)\n db.session.commit()\n return new_type", "def convert_type(self, name, type):\n\t\t#\t\tprint 'Called with name = %s and type = %s' %(name, type)\n\t\tname = ''.join(name.split())\n\t\ttype = ''.join(type.split())\n\n\t\tif re.match(r'\\w+', type): #It's a concrete type\n\t\t\treturn self.TYPES_DICT.get(type,type) + ' ' + name\n\n\t\tarrow = type.rfind('->')\n\t\tassert arrow != -1, \"If it's not a primitive, it must be a function\"\n\t\tparams, return_type = type[:arrow], type[arrow+2:]\n\t\tassert params[0] == '(' and params[-1] == ')'\n\t\tparams = params[1:-1]\n\n\t\tparams_tokenized = Lexer(params).get_tokens()\n\t\tparam_list = self.split_params(params_tokenized)\n\t\tcpp_params = map(lambda n: self.convert_type('', n), param_list)\n\t\treturn_type = self.convert_type('', return_type)\n\t\treturn return_type + '(*' + name + ')(' + ','.join(cpp_params) + ')'", "def get_type_data(name):\n name = name.upper()\n try:\n return {\n 'authority': 'birdland.mit.edu',\n 'namespace': 'coordinate format',\n 'identifier': name,\n 'domain': 'Coordinate Format Types',\n 'display_name': JEFFS_COORDINATE_FORMAT_TYPES[name] + ' Coordinate Format Type',\n 'display_label': JEFFS_COORDINATE_FORMAT_TYPES[name],\n 'description': ('The type for the ' +\n JEFFS_COORDINATE_FORMAT_TYPES[name] +\n ' Geographic coordinate format.')\n }\n except KeyError:\n raise NotFound('CoordinateFormat Type: ' + name)", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]", "def cpp_type(self, t):\n t = self.canon(t)\n if isinstance(t, basestring):\n if t in self.base_types:\n return self.cpp_types[t]\n # must be tuple below this line\n tlen = len(t)\n if 2 == tlen:\n if 0 == t[1]:\n return self.cpp_type(t[0])\n elif self.isrefinement(t[1]):\n if t[1][0] in self.cpp_types:\n subtype = self.cpp_types[t[1][0]]\n if callable(subtype):\n subtype = subtype(t[1], self)\n return subtype\n else:\n return self.cpp_type(t[0])\n else:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n return self._cpp_type_add_predicate(self.cpp_type(t[0]), last)\n elif 3 <= tlen:\n assert t[0] in self.template_types\n assert len(t) == len(self.template_types[t[0]]) + 2\n template_name = self.cpp_types[t[0]]\n assert template_name is not NotImplemented\n template_filling = []\n kinds = self.argument_kinds.get(t, ((Arg.NONE,),)*(tlen-2))\n for x, kind in zip(t[1:-1], kinds):\n if kind is Arg.LIT:\n x = self.cpp_literal(x)\n elif kind is Arg.TYPE:\n x = self.cpp_type(x)\n elif kind is Arg.VAR:\n x = self._cpp_var_name(x)\n elif isinstance(x, bool):\n x = self.cpp_types[x]\n elif isinstance(x, Number):\n x = str(x)\n else:\n try:\n x = self.cpp_type(x) # Guess it is a type?\n except TypeError:\n x = self._cpp_var_name(x) # Guess it is a variable\n template_filling.append(x)\n cppt = '{0}< {1} >'.format(template_name, ', '.join(template_filling))\n if 0 != t[-1]:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n cppt = self._cpp_type_add_predicate(cppt, last)\n return cppt", "def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass", "def _ConstructType(self, type_name, type_contents, filepath, require_guid):\n\n description = ''\n parents = None\n local_field_names = None\n opt_local_field_names = None\n is_abstract = False\n allow_undefined_fields = False\n is_canonical = False\n guid = None\n\n expected_keys = set([\n 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'guid',\n 'is_canonical', 'allow_undefined_fields'\n ])\n\n if 'description' in type_contents:\n description = type_contents['description']\n if 'implements' in type_contents:\n parents = type_contents['implements']\n if 'uses' in type_contents:\n local_field_names = type_contents['uses']\n if 'opt_uses' in type_contents:\n opt_local_field_names = type_contents['opt_uses']\n if 'is_abstract' in type_contents:\n is_abstract = type_contents['is_abstract']\n if 'allow_undefined_fields' in type_contents:\n allow_undefined_fields = type_contents['allow_undefined_fields']\n if 'is_canonical' in type_contents:\n is_canonical = type_contents['is_canonical']\n if 'guid' in type_contents:\n guid = type_contents['guid']\n\n # Generate tuples to represent each field\n fq_lfn = []\n if local_field_names:\n self._ConstructField(local_field_names, False, fq_lfn)\n if opt_local_field_names:\n self._ConstructField(opt_local_field_names, True, fq_lfn)\n\n entity_type = EntityType(\n filepath=filepath,\n typename=type_name,\n description=description,\n parents=parents,\n local_field_tuples=fq_lfn,\n is_abstract=is_abstract,\n allow_undefined_fields=allow_undefined_fields,\n inherited_fields_expanded=False,\n is_canonical=is_canonical,\n guid=guid,\n require_guid=require_guid,\n namespace=self.local_namespace)\n\n # Add errors to type if there's anything extra in the block. We add to the\n # entity type because an extra key here is likely a typo in a real key name\n # that would result in information being lost from the type.\n for key in type_contents:\n if key not in expected_keys:\n entity_type.AddFinding(\n findings_lib.UnrecognizedKeyError(key, entity_type.file_context))\n\n return entity_type", "def typedef(self, name: str) -> str:\n return camel_case(rstrip(lstrip(name, self.strip_prefix.lower() + \"_\"), '_t'))", "def XmlTypeName(self) -> str:", "def gettype(space, w_obj):\n return space.newstr(space.TYPENAMES[w_obj.tp])", "def __call__(self, node):\n # we assume we know what this type is and raise and catch the key error\n # exception if we don't\n try:\n s = self.lut[node.tagName](node, self)\n except KeyError, e:\n raise DeclareError(e[0], node)\n\n # save this, for use with typedef's later\n self.symbols[s.getType()+s.getName()] = s\n\n return s", "def _lex_type_lookup_func(self, name):\n is_type = self._is_type_in_scope(name)", "def map_opcode_types(type_name):\n # first check if the name is a register name\n if type_name in REGISTER_NAMES or type_name in REGISTER_TYPES:\n return Register(type_name)\n\n # check if it is an immediate value -- represente by 'imm'\n if type_name[:3] == 'imm':\n size = int(type_name[3:]) if len(type_name) > 3 else None\n return Immediate(type_name, size)\n\n if type_name[:5] == \"moffs\":\n # memory offset, regard as memory\n size = int(type_name[5:]) if len(type_name) > 5 else None\n return Memory(type_name, size)\n\n # if first value is m, refers to memory type\n if type_name[0] == \"m\":\n try:\n size = int(type_name[1:]) if len(type_name) > 1 else None\n return Memory(type_name, size)\n except ValueError:\n # error while converting last part, unknown type\n return Unknown(type_name)\n\n if type_name[:3] == \"rel\":\n size = int(type_name[3:]) if len(type_name) > 1 else None\n return Relative(type_name, size)\n\n return Unknown(type_name)", "def type_name(self):\n return self._type_name", "def map_type(name):\n\n rv = MAPPINGS.get(name, None)\n if rv is not None:\n return rv\n\n name = name.replace(\"&\", \"*\")\n\n if name.startswith(\"const \"):\n rv = map_type(name[6:])\n\n elif name.endswith(\"const\"):\n rv = map_type(name[:-5])\n\n elif name.endswith(\" *\"):\n mapped = map_type(name[:-2])\n rv = f\"POINTER({mapped})\"\n\n elif name.endswith(\" **\"):\n mapped = map_type(name[:-1])\n rv = f\"POINTER({mapped})\"\n\n elif name.endswith(\"]\"):\n m = re.match(r\"(.*) \\[(\\d+)\\]\", name)\n if m is None:\n raise Exception(f\"Couldn't map type {name}\")\n\n mapped = map_type(m.group(1))\n count = m.group(2)\n\n rv = f\"({mapped} * {count})\"\n\n elif \"(*)\" in name:\n return \"c_void_p\"\n\n else:\n raise Exception(f\"Couldn't map type {name!r}\")\n\n MAPPINGS[name] = rv\n return rv", "def get_type_data(name):\n name = name.upper()\n try:\n return {\n 'authority': 'okapia.net',\n 'namespace': 'string match types',\n 'identifier': name,\n 'domain': 'String Match Types',\n 'display_name': STRING_MATCH_TYPES[name] + ' String Match Type',\n 'display_label': STRING_MATCH_TYPES[name],\n 'description': ('The string match type for the ' +\n STRING_MATCH_TYPES[name])\n }\n except KeyError:\n raise NotFound('String Type: ' + name)", "def _generate_type(self, n, modifiers=[], emit_declname = True):\n\t\ttyp = type(n)\n\n\t\t#~ print(n, modifiers)\n\n\t\tif typ == pycparser.c_ast.TypeDecl:\n\t\t\ts = ''\n\t\t\tif n.quals: s += ' '.join(n.quals) + ' '\n\t\t\ts += self.visit(n.type)\n\n\t\t\t# Local variables & parameter renaming.\n\t\t\t#\n\t\t\t# Variable name substitution only applies to local variables or parameters names within function prototypes\n\t\t\t# (thus, global variables and function names need to be excluded)\n\t\t\t#\n\t\t\t# case 1: level-0 function parameters (no remanimg for nested parameters)\n\t\t\t# case 2: local variable declaration (thus excluding functions, global vars, struct-enum-union fields, nested parameters)\n\t\t\t#\n\t\t\tif self.__visitingParam == 1: # case 1\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case I)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.paramprefix + self.__currentFunction + '_'+self.inlineInfix #S:\n\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\telse: \n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)]\n\t\t\t\tn.declname = (self.paramprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname) if n.declname else '' #S:\n\t\t\t\n\t\t\telif (self.__visitingParam == 0 and # case 2\n\t\t\t\t\tself.__visitFuncDef == 0 and\n\t\t\t\t\tn.declname not in self.Parser.funcName and\n\t\t\t\t\t#n.declname not in self.Parser.varNames[''] and\n\t\t\t\t\tself.__currentFunction != '' and\n\t\t\t\t\tself.__visitStructUnionEnum == 0):\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case II)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#S: env.local, the followin two lines are replaced with the following if\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_'\n\t\t\t\t#n.declname = self.prefix + self.__currentFunction + '_' + n.declname if n.declname else ''\n\t\t\t\tif self.__init: \n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse: \n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.prefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\telse:\n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.nondetprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t#print n.declname\n\t\t\t\t#print self.newIDs\n\t\n\n\t\t\tnstr = n.declname if n.declname else ''\n\n\t\t\t# Resolve modifiers.\n\t\t\t# Wrap in parens to distinguish pointer to array and pointer to\n\t\t\t# function syntax.\n\t\t\t#\n\t\t\tfor i, modifier in enumerate(modifiers):\n\t\t\t\tif isinstance(modifier, pycparser.c_ast.ArrayDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '[' + self.visit(modifier.dim) + ']'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.FuncDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '(' + self.visit(modifier.args) + ')'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.PtrDecl):\n\t\t\t\t\tif modifier.quals:\n\t\t\t\t\t\tnstr = '* %s %s' % (' '.join(modifier.quals), nstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnstr = '*' + nstr\n\t\t\tif nstr: s += ' ' + nstr\n\t\t\treturn s\n\t\telif typ == pycparser.c_ast.Decl:\n\t\t\treturn self._generate_decl(n.type)\n\t\telif typ == pycparser.c_ast.Typename:\n\t\t\treturn self._generate_type(n.type)\n\t\telif typ == pycparser.c_ast.IdentifierType:\n\t\t\treturn ' '.join(n.names) + ' '\n\t\telif typ in (pycparser.c_ast.ArrayDecl, pycparser.c_ast.PtrDecl, pycparser.c_ast.FuncDecl):\n\t\t\treturn self._generate_type(n.type, modifiers + [n])\n\t\telse:\n\t\t\treturn self.visit(n)\n\n\n\n\t\tdef visit_Compound(self, n):\n\t\t\tself.__visitingCompound += 1\n\t\t\ts = super(self.__class__, self).visit_Compound(n)\n\t\t\tfor key in self.newIDs: #S: remove pairs that have been added in this compound\n\t\t\t\tstack = self.newIDs[key] \n\t\t\t\tif stack and stack[-1][1] == self.__visitingCompound: \n\t\t\t\t\tstack.pop()\n\t\t\tself.__visitingCompound -= 1\n\t\t\treturn s", "def resolve_type(name):\n types = {\n 'string': StringProperty,\n 'name': NameProperty,\n 'date': DateProperty,\n 'country': CountryProperty,\n 'address': AddressProperty,\n 'phone': PhoneProperty,\n 'email': EmailProperty,\n 'url': URLProperty,\n 'uri': URLProperty,\n 'identifier': IdentiferProperty\n }\n type_ = types.get(name.strip().lower())\n if type_ is None:\n raise TypeError(\"No such type: %s\" % name)\n return type_", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def mk_typ(self, name, kind):\n # (str, ty.Kind) -> ty.TypeVar\n\n typ = ty.TypeVar(name, kind)\n self.type_param_scopes[0].appendleft((name, typ))\n return typ", "def test_type_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntype type\")\n assert bb.programtype[\"name\"] == \"type\"", "def __init__(\n self,\n data_type,\n name=None,\n namespace=None,\n names=None,\n other_props=None,\n ):\n assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type)\n self._avro_name = names.get_name(name=name, namespace=namespace)\n\n super(NamedSchema, self).__init__(data_type, other_props)\n\n names.register(self)\n\n self._props['name'] = self.name\n if self.namespace:\n self._props['namespace'] = self.namespace", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))", "def from_string(cls, name: str) -> Enum:", "def get_type(self, name):\n pkg_name = name.split('.')[0]\n type_name = name.split('.')[1]\n for t in self.types:\n if t.package.name == pkg_name and t.name == type_name:\n return t\n return None", "def name_for(element_defn: JSON, type_defn: JSON) -> str:\n return element_defn.path.replace('[x]', PathElement.type_name(type_defn))", "def create(_type, *args, **kwargs):\n # noinspection PyUnresolvedReferences\n return IExplorer.registry[_type.lower()](*args, **kwargs)", "def __init__(self, name, types, reflection, year):\r\n self.name = name\r\n self.type = types\r\n self.reflection = reflection\r\n self.year = year", "def visit_Typedef(self, node):\n return str_node(node)", "def name(self):\r\n if self._name_map is None:\r\n self._name_map = {}\r\n for key,value in TypeKind.__dict__.items():\r\n if isinstance(value,TypeKind):\r\n self._name_map[value] = key\r\n return self._name_map[self]", "def type_name(self) -> str: # pragma: no cover\n return repr_type(self.type_obj)", "def __init__(self, inst, class_type):\n\t\tself.type = str(class_type)[7:]\n\t\tself.type = self.type[:-1]\n\t\tself.inst = inst\n\t\treturn", "def name(self) -> str:\n return self.type_data.name", "def element_type(self) -> global___Type:", "def getNameFromType(self, *args):\n return _libsbml.ASTBasePlugin_getNameFromType(self, *args)", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def genType(self, typeinfo, name, alias):\n OutputGenerator.genType(self, typeinfo, name, alias)\n\n typeElem = typeinfo.elem\n # If the type is a struct type, traverse the embedded <member> tags\n # generating a structure. Otherwise, emit the tag text.\n category = typeElem.get('category')\n\n # Add a typeCategory{} entry for the category of this type.\n self.addName(self.typeCategory, name, category)\n\n if category in ('struct', 'union'):\n self.genStruct(typeinfo, name, alias)\n else:\n if alias:\n # Add name -> alias mapping\n self.addName(self.alias, name, alias)\n\n # Always emit an alias (?!)\n count = 1\n\n # May want to only emit full type definition when not an alias?\n else:\n # Extract the type name\n # (from self.genOpts). Copy other text through unchanged.\n # If the resulting text is an empty string, do not emit it.\n count = len(noneStr(typeElem.text))\n for elem in typeElem:\n count += len(noneStr(elem.text)) + len(noneStr(elem.tail))\n\n if count > 0:\n if category == 'bitmask':\n requiredEnum = typeElem.get('requires')\n self.addName(self.flags, name, requiredEnum)\n\n # This happens when the Flags type is defined, but no\n # FlagBits are defined yet.\n if requiredEnum is not None:\n self.addMapping(name, requiredEnum)\n elif category == 'enum':\n # This case does not seem to come up. It nominally would\n # result from\n # <type name=\"Something\" category=\"enum\"/>,\n # but the output generator does not emit them directly.\n self.logMsg('warn', 'ScriptOutputGenerator::genType: invalid \\'enum\\' category for name:', name)\n elif category == 'funcpointer':\n self.funcpointers[name] = None\n elif category == 'handle':\n self.handles[name] = None\n elif category == 'define':\n self.defines[name] = None\n elif category == 'basetype':\n # Do not add an entry for base types that are not API types\n # e.g. an API Bool type gets an entry, uint32_t does not\n if self.apiName(name):\n self.basetypes[name] = None\n self.addName(self.typeCategory, name, 'basetype')\n else:\n self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name, 'category:', category)\n else:\n self.logMsg('diag', 'ScriptOutputGenerator::genType: unprocessed type:', name)", "def create_typedef(*args):\n return _ida_hexrays.create_typedef(*args)", "def get_type_doc_name(type):\n name = type.name\n if type.is_simple:\n return _get_simple_type_mapping(name)\n elif type.is_enum:\n return '{0}.{1}'.format(get_package_name(name), get_enum_name(name))\n elif type.is_complex:\n return '{0}.{1}'.format(get_package_name(name), get_class_name(name))", "def new_entity_type(name, client=default):\n data = {\"name\": name}\n return raw.create(\"entity-types\", data, client=client)", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def type_name_to_type(name):\n if name in SIMPLE_TYPES:\n return SIMPLE_TYPES[name]\n elif name in PROXY_TYPES:\n return PROXY_TYPES[name]\n return None", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def type_name(type_defn: JSON) -> str:\n rval = type_defn.get('code', '').replace('*', '').replace('@', '')\n return rval", "def gen_type_string(self, node):\n return self._gen_table[node.node_type()](self, node)", "def make_key(element_name, element_type, namespace):\n # only distinguish 'element' vs other types\n if element_type in ('complexType', 'simpleType'):\n eltype = 'complexType'\n else:\n eltype = element_type\n if eltype not in ('element', 'complexType', 'simpleType'):\n raise RuntimeError(\"Unknown element type %s = %s\" % (element_name, eltype))\n return (element_name, eltype, namespace)", "def __init__(self, base):\n if isinstance(base, str):\n self._name = base\n else:\n raise TypeError(NAME_CREATE_ERROR)", "def map_latencydata_types(type_name):\n\n if type_name in ['0', '1']:\n return Unknown(type_name)\n\n # cl is register that is used for certain instructions\n if type_name == 'cl':\n return Register(type_name)\n\n # TODO make this its own type?\n if type_name == \"stack pointer\":\n return Register(type_name)\n\n if type_name == \"[r+s*x]\" or type_name == \"[r+s*y]\":\n return Unknown(type_name)\n\n if type_name[:1] == 'r':\n if type_name[-1] == 'l' or type_name[-1] == 'h':\n # h, l refer to high, low? get rid of these and continnue as normally\n type_name = type_name[:-1] # mistake in the document? get rid of the trailing l?\n size = int(type_name[1:]) if len(type_name) > 1 else None\n return Register(type_name, size)\n\n # vector registers (I think)\n if type_name in [\"xmm\", \"mmx\", \"ymm\", \"mmy\"]:\n return Register(type_name)\n\n if type_name == 'i':\n return Immediate(type_name)\n if type_name == \"v\":\n return Register(type_name)\n\n if type_name[:3] == \"xmm\":\n return Register(type_name)\n\n if type_name[:2] == 'mm':\n size = int(type_name[2:]) if len(type_name) > 2 else None\n return Memory(type_name, size)\n\n if type_name[0] == 'm':\n size = int(type_name[1:]) if len(type_name) > 1 else None\n return Memory(type_name, size)\n\n if type_name == \"x\":\n return Register(type_name)\n\n if type_name == \"y\":\n return Register(type_name)\n\n if type_name == \"near\" or type_name == \"short\":\n return Unknown(type_name)\n raise ValueError(f\"uknown type {type_name}\")", "def get_sample_instance_for_type(type_name):\n try:\n type_ = getattr(ExtraTypeDefinitions, type_name)\n return known_python_type_typename_samplevalues[type_][1]\n except:\n try:\n return known_python_type_typename_samplevalues[getattr(types, type_name)][1]\n except:\n return known_python_type_typename_samplevalues[__builtins__[type_name]][1]", "def get_type_from_str(type_str):\n try:\n # Assume the current language to be C/C++ and make a try.\n return gdb.parse_and_eval(\"(%s *)0\" % type_str).type.target()\n except RuntimeError:\n # If assumption of current language to be C/C++ was wrong, then\n # lookup the type using the API.\n try:\n return gdb.lookup_type(type_str)\n except RuntimeError:\n return None", "def get_type(self, type_name):\n return type_cache.get_type_cache().get_type(type_name, self.target)", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name", "def to_jsontype(type):\n typename = type.__name__ if type else None\n renames = {'str': 'string', 'int': 'integer', 'bool': 'bool'}\n if typename in renames:\n typename = renames[typename]\n return typename", "def str_to_type(name_type):\n if name_type == 'float' or name_type == 'Float':\n return float\n if name_type == 'bool':\n return bool\n if name_type == 'int':\n return lambda x: int(float(x))\n if name_type == 'list':\n return ast.literal_eval\n if name_type == 'date':\n return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')\n if name_type == 'str':\n return str\n\n \n return None", "def typeof(inst):\n return type(inst).__name__", "def type_name(attr_type: AttrType) -> str:\n return attr_type.native_name or class_name(attr_type.name)", "def get_type_name(type):\n name = type.name\n if type.is_simple:\n return _get_simple_type_mapping(name)\n elif type.is_enum:\n return _get_simple_type_mapping('str')\n elif type.is_complex:\n return get_class_name(name)", "def get_type(value):\n\n # Evaluated string statement for type()\n var_type = str(eval(\"type({})\".format(value)))\n\n # Remove unwanted portions of string\n var_type = var_type.replace(\"<class '\", \"\").split(\"'>\", 1)[0]\n\n # Return processed string\n return var_type", "def __init__(self, name, elem_type=None, opt=False):\n if name not in TYPES:\n raise Exception('unexpected type: %r' % name)\n if elem_type is not None:\n assert(name in COMPOUND)\n self.name = name\n self.elem_type = elem_type\n self.opt = opt", "def getTypeCode(self):\n return _libsbml.ReplacedBy_getTypeCode(self)", "def type_name(self):\n return self.TYPE_NAMES[self.type]", "def typeof(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.get(CN('meta:value-type'), 'string')\n raise KeyError(name)", "def Instance(self) -> TypeManager:", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def _get_type_name(type_):\n # type: (type) -> str\n name = repr(type_)\n if name.startswith(\"<\"):\n name = getattr(type_, \"__qualname__\", getattr(type_, \"__name__\", \"\"))\n return name.rsplit(\".\", 1)[-1] or repr(type_)", "def test_vulkan_basetype_type_declaration() -> None:\n\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"basetype\">typedef <type>uint32_t</type> <name>VkSampleMask</name>;</type>\"\"\"\n\n basetype = basetype_parser.parse(ET.fromstring(xml))\n\n assert isinstance(basetype, internal_types.VulkanBaseType)\n assert basetype.typename == \"VkSampleMask\"\n assert basetype.basetype == \"uint32_t\"", "def create_new_type():\n string = \"I'm a string\"\n print(type(string))\n \"\"\"When passed 3 arguments, type() acts like a dynamic 'class' statement. It returns a new class object (see\n metaclassses notes)\n \"\"\"\n thing = type(\"SuperCoolClass\", (), {})\n thing.random = 4.556\n print(type(thing))\n print(vars(thing))\n print(thing)", "def getTypeCode(self):\n return _libsbml.FbcAnd_getTypeCode(self)", "def __init__(self, raw_type: type):\n self.raw_type = raw_type\n self.name = raw_type.__name__\n self.qualname = raw_type.__qualname__\n self.module = raw_type.__module__\n self.full_name = TypeInfo.to_full_name(raw_type)\n self.hash = hash(self.full_name)\n self.is_abstract = inspect.isabstract(raw_type)\n # TODO(fk) store more information on attributes\n self.instance_attributes: OrderedSet[str] = OrderedSet()\n self.attributes: OrderedSet[str] = OrderedSet()\n\n # TODO(fk) properly implement generics!\n # For now we just store the number of generic parameters for set, dict and list.\n self.num_hardcoded_generic_parameters: int | None = (\n 2 if raw_type is dict else 1 if raw_type in (set, list) else None\n )", "def get_value_type (self, name):\n value, type = wrapped (win32api.RegQueryValueEx, self.pyobject (), name)\n return type", "def get_type(self, wn):\n return self.chunk_type[self.chunk_map[wn]]", "def make_type(\n schema: Schema,\n name: str,\n module: Optional[str] = None,\n key_filename: Optional[str] = None,\n) -> Type[ConfigType]:\n result = type(\n name, (ConfigType,), {\"__schema__\": schema, \"__key_filename__\": key_filename}\n )\n # This is copied from the namedtuple method. We try to set the module of the new\n # class to the calling module.\n if module is None:\n try:\n module = sys._getframe(1).f_globals.get(\"__name__\", \"__main__\")\n except (AttributeError, ValueError): # pragma: no cover\n pass\n if module is not None:\n result.__module__ = module\n\n return result", "def _parse_type(type_name):\n tokens = findall(\"[^<>,]+|<|>|,\", type_name)\n\n def parse(tokens, tree):\n tree = list(tree)\n # It is an error to parse nothing\n if len(tokens) == 0:\n raise TypeNameError(type_name)\n first_token, *tail = tokens\n\n # The first token should be a name\n if first_token in {\"<\", \">\", \",\"}:\n raise TypeNameError(type_name)\n\n # Base case\n if len(tail) == 0:\n tree.append((first_token, ()))\n return tuple(tree), []\n next_token, *tail = tail\n\n # No subtypes\n if next_token == \",\":\n tree.append((first_token, ()))\n\n # Parse subtypes\n if next_token == \"<\":\n # Extract just the subtype tokens and parse them\n stack = [\"<\"]\n subtype_tokens = list()\n remaining_tokens = list()\n for t in tail:\n if len(stack) == 0:\n remaining_tokens.append(t)\n continue\n if t == \"<\":\n stack.append(t)\n elif t == \">\":\n stack.pop()\n subtype_tokens.append(t)\n if len(stack) > 0 or subtype_tokens[-1] != \">\":\n raise TypeNameError(type_name)\n subtypes, remaining = parse(subtype_tokens[:-1], [])\n # Parsing should consume all subtype tokens\n if len(remaining) != 0:\n raise TypeNameError(type_name)\n tree.append((first_token, subtypes))\n # Finish if all tokens are consumed\n if len(remaining_tokens) == 0:\n return tuple(tree), []\n next_token, *tail = remaining_tokens\n\n # If the next token is a comma, parse next\n if next_token == \",\":\n return parse(tail, tree)\n\n # None of the rules match, error\n raise TypeNameError(type_name)\n\n # There should only be one item at the root of the tree\n try:\n (parse_tree,) = parse(tokens, [])[0]\n except ValueError:\n raise TypeNameError(type_name)\n return parse_tree", "def __init__(self, name, code):\n self.name_in_source = name\n if isinstance(name, text_type):\n strip_symbols_re = compile_re('-|_')\n self.canonical_name = strip_symbols_re.sub('', name.lower())\n else:\n self.canonical_name = name\n self.code = code", "def type_to_sc_type(type_, prefix='sc'):\n return '{}{}'.format(prefix.upper(), type_.title())", "def getTypeCode(self):\n return _libsbml.SpeciesTypeInstance_getTypeCode(self)", "def make_anafaze_type(struct_def, name):\n return type(name, (object,), \n dict(\n to_python = partial(unpack, *[struct_def]),\n from_python = partial(pack, *[struct_def]),\n byte_size = calcsize(struct_def),\n )\n )()", "def makeTypeName (self, country_name):\n\t\t#capitalize the segments (but leave first segment lowercase)\n\t\tsegments = map (lambda x: x.capitalize(), country_name.split(\" \"))\n\t\tsegments[0] = segments[0].lower()\n\t\tname = \"\".join(segments) + \"Type\"\n\t\treturn unicode(name)", "def local_type(verifield, type_name):\n from polyglot.pyapi.meta import retrieve_schema_table_fields\n from polyglot.pyapi.instance import create_instance_validators\n from polyglot.models.schema import Instance\n (tenant_id, schema_id, table_id) = type_name.split(\"::\")\n fields = retrieve_schema_table_fields(tenant_id, schema_id, table_id)\n validators = Instance._validations\n validators['instance_data'] = create_instance_validators(fields)\n instance = Instance(**instance)\n instance.validate(validators)\n instance._validations = validators\n return not((hasattr(instance, 'validation_errors') \n and instance.validation_errors) \\\n or instance.instance_data.get('validation_errors', {}))", "def _get_type_name(self, st_type):\n if st_type <= 244: return 'str' + str(st_type)\n return self._type_names[st_type]", "def _type_def_helper(name, args, env: Env) -> typing.Tuple[Basic, typing.Dict[str, Undecided]]:\n\n new_basic = make_basic(name)\n env.set_named_type(name, new_basic)\n _ty_args = OrderedDict((arg, Undecided()) for arg in args)\n env.undecided_types.update(_ty_args)\n return new_basic, _ty_args", "def getTypeCode(self):\n return _libsbml.Input_getTypeCode(self)", "def to_type_name(self, text) -> str:\n return util.to_snake_case(self.split_to_body_and_ext(text)[0]).capitalize()", "def XmlTypeNamespace(self) -> str:" ]
[ "0.68378854", "0.6094486", "0.60735965", "0.5959993", "0.58422214", "0.57260895", "0.57252353", "0.5668879", "0.564833", "0.564122", "0.5636486", "0.56226254", "0.56203324", "0.5611236", "0.5606567", "0.55945", "0.5588911", "0.5588712", "0.55883664", "0.5574209", "0.55277264", "0.55228144", "0.54909337", "0.5486284", "0.548115", "0.54771245", "0.5457554", "0.54565096", "0.54516476", "0.544018", "0.5414875", "0.5397563", "0.5359341", "0.5358102", "0.535762", "0.5355063", "0.5343608", "0.53252727", "0.5311055", "0.52988386", "0.52829725", "0.5256982", "0.5255442", "0.5233199", "0.5225345", "0.5220105", "0.52156174", "0.5211656", "0.51975083", "0.51820546", "0.517677", "0.51611125", "0.515236", "0.5149711", "0.51410353", "0.51371264", "0.5136109", "0.51132685", "0.51045954", "0.51008123", "0.5099363", "0.5087142", "0.50821376", "0.50818086", "0.5078628", "0.5078562", "0.50766474", "0.50658005", "0.50648975", "0.5064129", "0.50636303", "0.5059679", "0.5057301", "0.5053615", "0.50419474", "0.5035432", "0.5031211", "0.50305235", "0.50268954", "0.5025117", "0.50234133", "0.50225437", "0.50148815", "0.5006509", "0.500571", "0.50021183", "0.50017715", "0.49997765", "0.49997482", "0.49971178", "0.49867812", "0.4972433", "0.49701715", "0.49667093", "0.4964962", "0.49645695", "0.4964163", "0.49598193", "0.4959577", "0.49541542" ]
0.5198417
48
Returns a typecode class representing the type we are looking for. localName name of the type we are looking for. namespaceURI defining XMLSchema targetNamespace.
def _getTypeClass(self, namespaceURI, localName): bti = BaseTypeInterpreter() simpleTypeClass = bti.get_typeclass(localName, namespaceURI) return simpleTypeClass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def find_class(self, class_name: str) -> Type:\n pass", "def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uris) == list:\n warnings.warn(\"Found more than 1 classes defined within schema using label {}\".format(class_name))\n return [SchemaClass(_item, self, output_type) for _item in uris]\n else:\n return SchemaClass(class_name, self, output_type)", "def XmlTypeNamespace(self) -> str:", "def type(self) -> Type[ClassType]:\n return self._type", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def get_type(self, name):\n pkg_name = name.split('.')[0]\n type_name = name.split('.')[1]\n for t in self.types:\n if t.package.name == pkg_name and t.name == type_name:\n return t\n return None", "def qname(type_):\n # type: (type) -> str\n\n return \"{0.__module__}.{0.__qualname__}\".format(type_)", "def XmlTypeName(self) -> str:", "def type(self) -> global___Type:", "def load_cls(node):\n return node.get_attr(Type).load()", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def GetEntityType(self, namespace_name, typename):\n if namespace_name not in self.type_namespaces_map:\n return None\n return self.type_namespaces_map[namespace_name].GetType(typename)", "def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]", "def get_type(node):\n # Assume there is only one type inferred\n # If there are multiple types inferred we have to\n # choose which one to pick\n try:\n if len(node.inferred()) > 0:\n ty_infer = node.inferred()[0]\n if isinstance(ty_infer, Module):\n ty = ty_infer.name\n elif isinstance(ty_infer, ClassDef):\n ty = ty_infer.name\n elif isinstance(ty_infer, type(Uninferable)):\n ty = None\n else:\n ty = ty_infer.pytype().replace(\"builtins.\", \"\").lstrip(\".\")\n else:\n ty = None\n except Exception as err:\n ty = None\n\n return ty", "def typ(rxn_class):\n return rxn_class[0]", "def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)", "def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)", "def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]", "def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)", "def get_typ(self, refobj):\n enum = cmds.getAttr(\"%s.type\" % refobj)\n try:\n return JB_ReftrackNode.types[enum]\n except IndexError:\n raise ValueError(\"The type on the node %s could not be associated with an available type: %s\" %\n (refobj, JB_ReftrackNode.types))", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def _PyType_Lookup(space, type, w_name):\n w_type = from_ref(space, rffi.cast(PyObject, type))\n assert isinstance(w_type, W_TypeObject)\n\n if not space.isinstance_w(w_name, space.w_text):\n return None\n name = space.text_w(w_name)\n w_obj = w_type.lookup(name)\n # this assumes that w_obj is not dynamically created, but will stay alive\n # until w_type is modified or dies. Assuming this, we return a borrowed ref\n return w_obj", "def element_type(self) -> global___Type:", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def get_handle_class(handle_class_name: str) -> Type[\"Handle\"]:\n klass = get_type_registry().parse_type_name(handle_class_name)\n return klass", "def get_type(self, type_name):\n return type_cache.get_type_cache().get_type(type_name, self.target)", "def type(cls):\n return cls.__name__", "def getTypeFromName(self, *args):\n return _libsbml.ASTBasePlugin_getTypeFromName(self, *args)", "def get_schema_cls() -> t.Any:\n return None", "def return_type(self) -> global___Type:", "def getTypeCode(self):\n return _libsbml.SBMLDocument_getTypeCode(self)", "def getTypeReference(self):\r\n return self.implementationTypeRef", "def get_type_from_str(type_str):\n try:\n # Assume the current language to be C/C++ and make a try.\n return gdb.parse_and_eval(\"(%s *)0\" % type_str).type.target()\n except RuntimeError:\n # If assumption of current language to be C/C++ was wrong, then\n # lookup the type using the API.\n try:\n return gdb.lookup_type(type_str)\n except RuntimeError:\n return None", "def get_type(self):\n return self._type_obj", "def get_class(self, name):\n return self.host.get_class(name)", "def _get_value_type_class():\n global _value_type_class\n if not _value_type_class:\n from energyquantified.data import ValueType\n _value_type_class = ValueType\n return _value_type_class", "def node_cls(self):\n return resolve_resource(self._node_cls)", "def getTypeCode(self):\n return _libsbml.SBase_getTypeCode(self)", "def C(classname):\n return objc.objc_getClass(_utf8(classname))", "def get_class_for(self, elem):\r\n\t\treturn self.__tag_to_cls.get(elem.tag, self.__default_cls)", "def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass", "def __call__(self, cls_or_name: Union[str, Type]) -> Type[DTSchema]:\n if isinstance(cls_or_name, type):\n n = cls_or_name.__name__\n else:\n n = cls_or_name\n if hasattr(self, n):\n return getattr(self, n)\n raise ValueError(f\"Could not find type {cls_or_name}\")", "def get_xsd_type(self, item):\n if not self.xsd_types or isinstance(self.xsd_types, AbstractSchemaProxy):\n return\n elif isinstance(item, str):\n xsd_type = self.xsd_types.get(item)\n elif isinstance(item, AttributeNode):\n xsd_type = self.xsd_types.get(item[0])\n else:\n xsd_type = self.xsd_types.get(item.tag)\n\n if not xsd_type:\n return\n elif not isinstance(xsd_type, list):\n return xsd_type\n elif isinstance(item, AttributeNode):\n for x in xsd_type:\n if x.is_valid(item[1]):\n return x\n elif not isinstance(item, str):\n for x in xsd_type:\n if x.is_simple():\n if x.is_valid(item.text):\n return x\n elif x.is_valid(item):\n return x\n\n return xsd_type[0]", "def _get_class(self, name):\n return self._hw_mm.namespaces[\"hw_devices\"][name]", "def getTypeCode(self):\n return _libsbml.ReplacedBy_getTypeCode(self)", "def get_class(self, name):\n raise NotImplementedError", "def type(cls):", "def getTypeCode(self):\n return _libsbml.SBaseRef_getTypeCode(self)", "def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)", "def specific_class(self):\n\n specific_type = ContentType.objects.get_for_id(self.specific_type_id)\n return specific_type.model_class()", "def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def get_typecode(self, name):\n return self.codes['type_codes'][name]", "def type_name(self):\n return self._type_name", "def get_class(self):\n return devices.get_class(self.type)", "def getDecoderClass(typ):\n for cls in (CentralDirEntry, LocalFileHeader, EndOfCentralDir, DataDescriptor, Zip64EndOfDir, Zip64EndOfDirLocator, ExtraEntry, SpannedArchive, ArchiveSignature):\n if cls.MagicNumber == typ:\n return cls", "def get_class_name(self):\n\n if \"class\" in self._root.attrib:\n return self._root.attrib['class']\n else:\n return self._root.tag", "def xsType(self):\n return self.xsID[0]", "def type(name):", "def test_returns_class(self):\n assert type is simple_class().__class__", "def getTypeCode(self):\n return _libsbml.Output_getTypeCode(self)", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def type(self):\n if self._type is None:\n self._type = None if len(self) == 0 else self.top.__class__\n return self._type", "def ClassForModel(cls, namespace, name):\n try:\n return cls.schema[namespace][name]\n except KeyError:\n raise BadModelError(\"There is no Model with name: %s\" % name)", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def _spark_type_class(self) -> Type[DataType]:", "def get_element_type(cls):\r\n return cls._type_name(cls.element_type)", "def schema(self):\n schema_el = self.root.xpath(\n '/wsdl:definitions/wsdl:types/xsd:schema', namespaces=NS_MAP,\n )[0]\n return element_as_tree(schema_el)" ]
[ "0.6334454", "0.63214386", "0.621163", "0.60691124", "0.5908769", "0.58564675", "0.5796145", "0.5774467", "0.5729281", "0.5662747", "0.5652824", "0.55890405", "0.5530385", "0.5529409", "0.5522239", "0.5496328", "0.54915804", "0.54823256", "0.54342365", "0.5419367", "0.5410473", "0.53962886", "0.5385955", "0.5384445", "0.5382902", "0.5379116", "0.5373598", "0.5356813", "0.5352603", "0.5352074", "0.53407496", "0.5338804", "0.5336503", "0.5331502", "0.5327753", "0.5323818", "0.53235364", "0.53201985", "0.5304521", "0.53043854", "0.52817035", "0.5281319", "0.52717507", "0.5263307", "0.5248219", "0.5246166", "0.5238851", "0.5237311", "0.52371657", "0.52283275", "0.5227235", "0.52211964", "0.52182555", "0.5213215", "0.52041006", "0.5200475", "0.51957345", "0.519023", "0.5186255", "0.5185666", "0.51841134", "0.51675755", "0.5165735", "0.5159934", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51594377", "0.51370203", "0.5133603", "0.5133396", "0.5133396", "0.5133396", "0.5133396", "0.5133396", "0.51264775", "0.51198477" ]
0.7862565
0
extracts the features used to calculate neural style cost gram_style_features a list of gram matrices calculated from the style layer outputs of the style image content_feature the content layer output of the content image
def generate_features(self): content_input = self.content_image * 255 style_input = self.style_image * 255 preprocessed_content = tf.keras.applications.vgg19.preprocess_input( content_input) preprocessed_style = tf.keras.applications.vgg19.preprocess_input( style_input) outputs_content = self.model(preprocessed_content) outputs_style = self.model(preprocessed_style) num_style_layers = tf.size(self.style_layers) style_outputs, content_outputs = ( outputs_style[:num_style_layers], outputs_content[num_style_layers:]) style_outputs = [self.gram_matrix( style_output)for style_output in style_outputs] self.gram_style_features = style_outputs self.content_feature = content_outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_image)\n\n # Get just the style feature layers (exclude the content layer)\n style_outputs = outputs[:NUM_STYLE_LAYERS]\n\n # for each style layer, calculate the gram matrix for that layer and store these results in a list\n gram_style_features = [gram_matrix(style_layer) for style_layer in style_outputs]\n ### END CODE HERE ###\n return gram_style_features", "def get_feature_representations(model, content_img, style_img):\n # Load our images in \n content = load_and_process_img(content_img)\n style = load_and_process_img(style_img)\n\n # batch compute content and style features\n style_outputs = model(style)\n content_outputs = model(content)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]\n content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]\n\n return style_features, content_features", "def _get_feature_representations(self, content_and_style_class):\n # Load our images in\n content_image = content_and_style_class.processed_content_image\n style_image = content_and_style_class.processed_style_image\n\n # batch compute content and style features\n style_outputs = self.model(style_image)\n content_outputs = self.model(content_image)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0]\n for style_layer in style_outputs[:self.num_style_layers]]\n content_features = [content_layer[0]\n for content_layer in content_outputs[self.num_style_layers:]]\n return style_features, content_features", "def get_content_image_features(image):\n\n ### START CODE HERE ###\n # preprocess the image\n preprocessed_content_image = preprocess_image(image)\n \n # get the outputs from the inception model\n outputs = inception(preprocessed_content_image)\n\n # get the content layer of the outputs\n content_outputs = outputs[:NUM_CONTENT_LAYERS]\n\n ### END CODE HERE ###\n return content_outputs", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def get_features(image, features, gparams, fg_size):\n import numpy as np\n from get_hog import get_hog\n\n if len(image.shape) == 2:\n image = image.reshape([image.shape[0], image.shape[1], 1])\n\n if len(image.shape) == 3:\n [im_height, im_width, num_im_chan] = image.shape\n num_images = 1\n else:\n [im_height, im_width, num_im_chan, num_images] = image.shape\n\n tot_feature_dim = features['fparams']['nDim']\n\n if fg_size is None or (not fg_size is True):\n if gparams['cell_size'] == -1:\n fg = get_hog(image, features['fparams'], gparams)\n fg_size = fg.shape\n else:\n fg_size = [np.floor(im_height / gparams['cell_size']), np.floor(im_width / gparams['cell_size'])]\n\n feature_image = get_hog(image, features['fparams'], gparams)\n if num_images == 1:\n feature_image = feature_image.reshape(feature_image.shape[0], feature_image.shape[1],\n feature_image.shape[2], 1)\n\n feature_pixels = np.zeros([int(fg_size[0]), int(fg_size[1]), tot_feature_dim, num_images])\n feature_pixels[:, :, 0::, :] = feature_image\n support_sz = [im_height, im_width]\n\n return feature_pixels, support_sz", "def extractFeatures(image, feature_list):\n # for multiple features or color features\n #feat_vec = np.array([])\n \n # sift has 128D\n feat_vec = np.empty((0,128))\n n_channels = (image.shape[2] if len(image.shape)==3 else 1)\n \n #img_f32 = image.astype(np.float32)\n\n for feature in feature_list:\n if (feature.strip().lower() == 'dsift'):\n print \"computing dsift (dense rootSift) features\"\n dense = cv2.FeatureDetector_create(\"Dense\")\n sift = cv2.SIFT()\n if n_channels == 1:\n kp = dense.detect(image[:,:])\n # compute kp descriptors\n _,des = sift.compute(image[:,:],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n \n feat_vec = np.vstack((feat_vec, des))\n else:\n for channel in xrange(n_channels):\n kp = dense.detect(image[:,:,channel])\n _,des = sift.compute(image[:,:,channel],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n\n feat_vec = np.vstack((feat_vec, des))\n \n# if (feature.strip().lower() == 'color'):\n# print \"computing color features\"\n# # scale from 0-255 between 0 and 1\n# if args.scale == 1:\n# img_f32 /= 255.\n# \n# f_tmp = img_f32.flatten()\n# feat_vec = np.append(feat_vec, f_tmp)\n else:\n raise Exception(\"Method '%s' is not implemented!\"%(feature)) \n \n return feat_vec", "def extract_features(self, preprocessed_inputs, init_extraction=False):\n if init_extraction:\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n preprocessed_inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n feature_head = image_features['Conv2d_13_pointwise']\n feature_head = slim.conv2d(\n feature_head,\n 512, [3,3],\n stride=1,\n padding='SAME',\n scope='Conv2d_Append_1x1_256'\n )\n feature_head = tf.nn.avg_pool(feature_head, strides=[1,1,1,1], ksize=[1,4,4,1],\n padding='VALID', )\n return feature_head\n else:\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n\n bottomup_features_names = [ 'Conv2d_11_pointwise', 'Conv2d_13_pointwise']\n num_appended_layers = 0\n #appended_channel_num = [512, 256, 256, 256]\n appended_channel_num = [512]\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n preprocessed_inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n\n topdown_features = self._topdown_feature_maps(\n image_features,\n bottomup_features_names=bottomup_features_names,\n num_appended_layers = num_appended_layers,\n appended_channel_num = appended_channel_num)\n return topdown_features.values()", "def getFeatures(c):\n\n\n feature_list = []\n lc_rc_list = []\n w1 = c.getStack(0)\n w2 = c.getStack(1)\n w3 = c.getStack(2)\n b1 = c.getBuffer(0)\n b2 = c.getBuffer(1)\n b3 = c.getBuffer(2)\n for i in [w1, w2]: #12\n lc = c.getLeftChild(i,1) # 1 st left child of the word on the stack.\n rc = c.getRightChild(i,1) # 1 st right child of the word on the stack.\n lc_rc_list.append(lc)\n lc_rc_list.append(rc)\n lc_rc_list.append(c.getLeftChild(i,2)) # 2 nd left child of the word on the stack\n lc_rc_list.append(c.getRightChild(i,2)) # 2 nd right child of the word on the stack\n lc_rc_list.append(c.getLeftChild(lc,1)) # 1 st left child of the left child of the word on the stack\n lc_rc_list.append(c.getRightChild(rc,1)) # 1 st right child of the right child of the word on the stack\n ########################### 18 Word Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getWordID(c.getWord(i))) # 6 words of the stack and buffer\n\n for i in lc_rc_list: #12 words of the tree\n feature_list.append(getWordID(c.getWord(i)))\n\n ########################### 18 Tag Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getPosID(c.getPOS(i))) # 6 tags of the owrds on the stack and the buffer\n\n for i in lc_rc_list:\n feature_list.append(getPosID(c.getPOS(i))) #12 tags of the words onthe stack and the buffer.\n ########################### 12 label Features ###########################\n for i in lc_rc_list:\n feature_list.append(getLabelID(c.getLabel(i))) #12 labels of the words on the stack and the buffer.\n\n\n return feature_list", "def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features", "def _extract_features(images,\n model_options,\n weight_decay=0.0001,\n reuse=tf.AUTO_REUSE,\n is_training=False,\n fine_tune_batch_norm=False):\n # feature extractor is a backbone factory\n DEBUG_VARS.raw_image = images\n features, end_points = feature_extractor.extract_features(\n images,\n output_stride=model_options.output_stride,\n multi_grid=model_options.multi_grid,\n model_variant=model_options.model_variant,\n weight_decay=weight_decay,\n reuse=reuse,\n is_training=is_training,\n fine_tune_batch_norm=fine_tune_batch_norm)\n\n # TODO:check\n # DEBUG_VARS.xception_feature = end_points['xception_65/entry_flow/conv1_1/Relu:0']\n DEBUG_VARS.xception_feature = features\n if not model_options.aspp_with_batch_norm:\n return features, end_points\n else:\n batch_norm_params = {\n 'is_training': is_training and fine_tune_batch_norm,\n 'decay': 0.9997,\n 'eps': 1e-5,\n 'affine': True,\n }\n regularize_func = regularizer('l2', weight_decay)\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n with arg_scope([sep_conv2d], activate=tf.nn.relu, activate_middle=tf.nn.relu, batch_norm=True,\n depthwise_weight_reg=None, pointwise_weight_reg=regularize_func,\n padding='SAME', strides=[1, 1]):\n with arg_scope([conv2d], activate=tf.nn.relu, weight_reg=regularize_func,\n batch_norm=True, padding='SAME', strides=[1, 1]):\n # TODO: ASPP IS IMPLEMENTED HERE! Check Out!\n with arg_scope([batch_norm2d], **batch_norm_params):\n depth = 256\n branch_logits = []\n\n # TODO: ADD IMAGE POOLING HERE\n if model_options.add_image_level_feature:\n # this crop size has been updated to the new scaled one outside, which is the exact size\n # of this model's inputs\n pool_height = scale_dimension(model_options.crop_size[0],\n 1. / model_options.output_stride)\n pool_width = scale_dimension(model_options.crop_size[1],\n 1. / model_options.output_stride)\n # global average pooling, check whether the shape here is 1?\n image_feature = avg_pool2d(\n features, [pool_height, pool_width], [pool_height, pool_width],\n padding='VALID')\n # collapse channels to depth after GAP\n image_feature = conv2d(\n inputs=image_feature, outc=depth, ksize=[1, 1], name=_IMAGE_POOLING_SCOPE)\n # TODO:check\n DEBUG_VARS.image_feature = image_feature\n # reshape it to final feature map shape\n image_feature = tf.image.resize_bilinear(\n image_feature, [pool_height, pool_width], align_corners=True)\n image_feature.set_shape([None, pool_height, pool_width, depth])\n # add image level feature to branch_logits\n branch_logits.append(image_feature)\n\n # Employ a 1x1 convolution.\n branch_logits.append(conv2d(features, outc=depth, ksize=[1, 1], name=_ASPP_SCOPE + str(0)))\n\n if model_options.atrous_rates:\n # Employ 3x3 convolutions with different atrous rates.\n DEBUG_VARS.aspp_features = []\n for i, rate in enumerate(model_options.atrous_rates, 1):\n scope = _ASPP_SCOPE + str(i)\n if model_options.aspp_with_separable_conv:\n aspp_features = sep_conv2d(\n features, outc=depth, ksize=[3, 3], ratios=[rate, rate], name=scope)\n DEBUG_VARS.aspp_features.append(aspp_features)\n else:\n aspp_features = conv2d(\n features, outc=depth, ksize=[3, 3], ratios=[rate, rate], name=scope)\n branch_logits.append(aspp_features)\n\n # Merge branch logits.\n concat_logits = tf.concat(branch_logits, 3)\n concat_logits = conv2d(inputs=concat_logits, outc=depth, ksize=[1, 1],\n name=_CONCAT_PROJECTION_SCOPE)\n DEBUG_VARS.aspp_concat_feature = concat_logits\n concat_logits = drop_out(concat_logits, kp_prob=0.9, is_training=is_training,\n name=_CONCAT_PROJECTION_SCOPE + '_dropout')\n\n return concat_logits, end_points", "def _extract_features(self, graphs, ai2d_ann, image, layers):\n # To begin with, build the grouping graph, which is provides the layout\n # information on all diagram elements, which can be then picked out in\n # other graphs, if necessary.\n graph = graphs['grouping']\n\n # Check that a graph exists\n try:\n\n # Fetch nodes from the graph\n nodes = graph.nodes(data=True)\n\n except AttributeError:\n\n return None\n\n # Begin extracting the features by getting the diagram image shape\n h, w = image.shape[:2]\n\n # Get the number of pixels in the image\n n_pix = h * w\n\n # Set up a placeholder dictionaries to hold updated node and edge\n # features\n node_features = {}\n edge_features = {}\n\n # Loop over the nodes and their features\n for node, features in nodes:\n\n # Fetch the node type from its features under the key 'kind'\n node_type = features['kind']\n\n # Parse layout annotation\n layout_feats = self._parse_ai2d_layout(ai2d_ann, # annotation\n h, # image height\n w, # image width\n n_pix, # n of pixels\n node_type, # elem type\n node # node id\n )\n\n # Add layout features to the dictionary of updated node features\n node_features[node] = {'features': layout_feats,\n 'kind': self.node_dict['grouping'][node_type]}\n\n # Updated node attributes in the grouping graph using layout\n # features\n nx.set_node_attributes(graph, node_features)\n\n # Calculate features for grouping nodes based on their children. This\n # requires a directed tree graph.\n group_tree = nx.dfs_tree(graph, source=\"I0\")\n\n # Get a list of grouping nodes and image constants in the graph\n groups = [n for n, attr in graph.nodes(data=True) if attr['kind']\n in [self.node_dict['grouping']['imageConsts'],\n self.node_dict['grouping']['group']]]\n\n # Iterate over the nodes in the graph\n for n, attr in graph.nodes(data=True):\n\n # Check if the node type is a group\n if n in groups:\n\n # Get predecessors of the grouping node\n n_preds = nx.dfs_predecessors(group_tree, n)\n\n # Remove groups from the list of predecessor;\n # each group will be processed indepedently\n n_preds = [n for n in n_preds.keys() if n not in groups]\n\n # Create a subgraph consisting of preceding nodes\n n_subgraph = graph.subgraph(n_preds)\n\n # Get layout features for each node\n n_feats = [ad['features'] for n, ad in\n n_subgraph.nodes(data=True)]\n\n # Cast stacked features into a 2D numpy array\n stacked_feats = np.array(n_feats)\n\n # Get average centre point for group by slicing the array\n x_avg = np.average(stacked_feats[:, 0])\n y_avg = np.average(stacked_feats[:, 1])\n\n # Add up their area\n a_sum = np.sum(stacked_feats[:, 2])\n\n # Average the solidity\n s_avg = np.average(stacked_feats[:, 3])\n\n # Concatenate the features\n layout_feats = np.concatenate([[x_avg], [y_avg],\n [a_sum], [s_avg]], axis=0)\n\n # Update group feature dictionary\n upd_group_feats = {n: {'features': layout_feats,\n 'kind': attr['kind']}}\n\n # Update group features\n nx.set_node_attributes(graph, upd_group_feats)\n\n # Add edge types to the grouping layer, as these are not defined in the\n # JSON annotation. To do so, get the edges from the grouping graph.\n edges = graph.edges(data=True)\n\n # Loop over the edges in the graph\n for src, dst, features in edges:\n\n # Add edge type unde key 'kind' to the edge_features dictionary\n edge_features[src, dst] = {'kind': 'grouping'}\n\n # Update edge features in the grouping graph\n nx.set_edge_attributes(graph, edge_features)\n\n # Encode edge features\n self._encode_edges(graph, self.edge_dict['grouping'])\n\n # Update the grouping graph in the graphs dictionary\n graphs['grouping'] = graph\n\n # Now that the grouping layer has been created, check which other\n # annotation layers must be included in the graph-based representation.\n\n # The combination of grouping and connectivity layers is a relatively\n # simple case.\n if layers == \"grouping+connectivity\":\n\n # If a connectivity graph exists, merge it with the grouping graph\n if graphs['connectivity'] is not None:\n\n # Use nx.compose() to combine the grouping and connectivity\n # graphs\n graph = nx.compose(graphs['connectivity'], graphs['grouping'])\n\n # Encode edge type information using numerical labels\n self._encode_edges(graph, self.edge_dict['connectivity'])\n\n # Update the grouping graph\n graphs['grouping'] = graph\n\n # The connectivity layer alone is a bit more complex, as the children of\n # grouping nodes need to be copied over to the connectivity graph.\n if layers == 'connectivity' and graphs['connectivity'] is not None:\n\n # Get the grouping and connectivity graphs\n conn_graph = graphs['connectivity']\n group_graph = graphs['grouping']\n\n # Get a list of nodes in the connectivity graph\n conn_nodes = list(conn_graph.nodes(data=True))\n\n # Get a list of grouping nodes in the connectivity graph\n grouping_nodes = [n for n, attr_dict in conn_nodes\n if attr_dict['kind'] == 'group']\n\n # If grouping nodes are found, get their children and add them to\n # the graph\n if len(grouping_nodes) > 0:\n\n # Create a directed tree graph using depth-first search,\n # starting from the image constant I0.\n group_tree = nx.dfs_tree(group_graph, source=\"I0\")\n\n # Loop over each grouping node\n for gn in grouping_nodes:\n\n # Resolve grouping nodes by adding their children to the\n # connectivity graph\n self._resolve_grouping_node(gn, group_tree,\n group_graph, conn_graph)\n\n # If the connectivity graph does not include grouping nodes, simply\n # copy the node features from the grouping graph.\n n_subgraph = group_graph.subgraph(conn_graph.nodes)\n\n # Add these nodes to the connectivity graph\n conn_graph.add_nodes_from(n_subgraph.nodes(data=True))\n\n # Encode edge type information using numerical labels\n self._encode_edges(conn_graph, self.edge_dict['connectivity'])\n\n # Update the connectivity graph in the graphs dictionary\n graphs['connectivity'] = conn_graph\n\n # Start building the discourse graph by getting node features from the\n # grouping graph.\n if layers == 'discourse':\n\n # Get grouping and discourse graphs\n group_graph = graphs['grouping']\n rst_graph = graphs['discourse']\n\n # Reverse node type dictionary for the grouping layer\n rev_group_dict = {int(v.item()): k for k, v in\n self.node_dict['grouping'].items()}\n\n # Re-encode node types to ensure that node types do not clash with\n # those defined for discourse graph\n upd_node_types = {k: rev_group_dict[int(v['kind'].item())]\n for k, v in group_graph.nodes(data=True)}\n\n # Update node attributes for the grouping graph\n nx.set_node_attributes(group_graph, upd_node_types, 'kind')\n\n # Get the nodes participating in the discourse graph from the\n # grouping graph using the .subgraph() method.\n subgraph = group_graph.subgraph(rst_graph.nodes)\n\n # Add these nodes back to the discourse graph with their features\n # and numerical labels. These will overwrite the original nodes.\n rst_graph.add_nodes_from(subgraph.nodes(data=True))\n\n # Check if discourse graph contains groups or split nodes. Split\n # nodes are used to preserve the tree structure in case a diagram\n # element participates in multiple RST relations.\n for n, attr_dict in rst_graph.copy().nodes(data=True):\n\n # Check if the node is a group\n if 'group' in attr_dict['kind']:\n\n # Create a directed tree graph using depth-first search,\n # starting from the image constant I0.\n group_tree = nx.dfs_tree(group_graph, source=\"I0\")\n\n # Resolve grouping nodes by adding their children to the\n # discourse graph.\n self._resolve_grouping_node(n, group_tree,\n group_graph, rst_graph)\n\n # Check node for the copy_of attribute, which contains a\n # reference to the node which has been split.\n if 'copy_of' in attr_dict.keys():\n\n # Get the identifier of the node in AI2D layout annotation\n n_orig_id = attr_dict['copy_of']\n n_orig_kind = attr_dict['kind']\n\n # Fetch node data from the AI2D layout annotation\n layout_feats = self._parse_ai2d_layout(ai2d_ann,\n h,\n w,\n n_pix,\n n_orig_kind,\n n_orig_id)\n\n # Add updated features to a dictionary\n upd_node_feats = {n: {'features': layout_feats,\n 'kind': n_orig_kind}}\n\n # Update node features in the graph\n nx.set_node_attributes(rst_graph, upd_node_feats)\n\n # Check if the node is a relation\n if 'relation' in attr_dict['kind']:\n\n # Get integer label for RST relation\n rst_int_label = self.node_dict['relations'][attr_dict['rel_name']]\n\n # Get node labels and encode using label binarizer\n rst_label = self._rst_binarizer.transform(rst_int_label)\n\n # Check if label smoothing is requested:\n if self._smooth_labels:\n\n # Cast into float for label smoothing\n rst_label = np.asarray(rst_label, dtype=np.float64)\n\n # Smooth the labels by a factor of 0.1\n rst_label *= (1 - 0.1)\n rst_label += (0.1 / rst_label.shape[1])\n\n # Store encoded information into the updated features dict\n upd_node_feats = {n: {'features': rst_label.flatten()}}\n\n # Set the updated features to nodes in the discourse graph\n nx.set_node_attributes(rst_graph, upd_node_feats)\n\n # Check if a NetworkX graph should be returned\n if self._return_nx:\n\n return rst_graph\n\n # Convert node identifiers to integers. This needs to be performed\n # before creating a heterograph.\n rst_graph = nx.convert_node_labels_to_integers(rst_graph,\n first_label=0)\n\n # Get nodes and convert to NumPy array; get unique nodes; get node\n # type index vector\n nodes = np.asarray([attr['kind'] for n, attr in\n rst_graph.nodes(data=True)]).flatten()\n\n ntypes = np.unique(nodes)\n\n node_ixs = np.array([np.where(ntypes == n) for n in\n np.nditer(nodes)], dtype=np.int64).flatten()\n\n # Do the same for edges\n edges = np.asarray([attr['kind'] for s, t, attr in\n rst_graph.edges(data=True)]).flatten()\n\n etypes = np.unique(edges)\n\n edge_ixs = np.array([np.where(etypes == e) for e in\n np.nditer(edges)], dtype=np.int64).flatten()\n\n # Create DGL graph object from the discourse graph\n g = dgl.from_networkx(rst_graph)\n\n # Assign node and edge types\n g.ndata[dgl.NTYPE] = torch.LongTensor(node_ixs)\n g.edata[dgl.ETYPE] = torch.LongTensor(edge_ixs)\n\n # Create a DGL heterograph from the DGL graph object\n hg = dgl.to_heterogeneous(g, ntypes, etypes)\n\n # Loop over node types in the heterograph\n for ntype in hg.ntypes:\n\n # Get unique node identifiers for this node type; cast to list\n rst_node_ids = hg.nodes[ntype].data[dgl.NID].tolist()\n\n # Loop over RST node identifiers\n features = np.vstack([rst_graph.nodes[node_id]['features']\n for node_id in rst_node_ids])\n\n # Add features to DGL heterograph\n hg.nodes[ntype].data['features'] = torch.from_numpy(features)\n\n # Update the RST graph\n graphs['discourse'] = hg\n\n # Return all graphs\n return graphs", "def _extract_features(self, all_batches, patch_size, train=True):\n # manually derive basic intensities features\n # takes 20 sec / 1048 images batch on my laptop in 4 cores //\n p = patch_size\n r = 512 // p\n labels = np.empty(0)\n feats = np.empty(0)\n for counter, tmp in enumerate(all_batches):\n # if counter == 2:\n # break\n if train:\n batch_img, batch_label = tmp\n else:\n batch_img = tmp\n batch_label = np.empty(0)\n # just for testing just use 20 batch as training set\n print('processing batch {}'.format(counter))\n t1 = time.time()\n batch_feats = np.asarray(\n parmap.map(\n self._get_features_from_batch_images,\n batch_img,\n r,\n p,\n pm_pbar=True))\n print(time.time() - t1)\n labels = np.concatenate(\n (labels, batch_label)) if labels.size else batch_label\n feats = np.concatenate(\n (feats, batch_feats)) if feats.size else batch_feats\n if train:\n return feats, labels\n else:\n return feats", "def stylize(network, initial, content, styles, iterations,\n content_weight, style_weight, style_blend_weights, tv_weight,\n learning_rate, print_iterations=None, checkpoint_iterations=None):\n shape = (1,) + content.shape\n style_shapes = [(1,) + style.shape for style in styles]\n content_features = {}\n style_features = [{} for _ in styles]\n\n # compute content features in feedforward mode\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=shape)\n net, mean_pixel = vgg.net(network, image)\n content_pre = np.array([vgg.preprocess(content, mean_pixel)])\n content_features[CONTENT_LAYER] = net[CONTENT_LAYER].eval(\n feed_dict={image: content_pre})\n\n # compute style features in feedforward mode\n for i in range(len(styles)):\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=style_shapes[i])\n net, _ = vgg.net(network, image)\n style_pre = np.array([vgg.preprocess(styles[i], mean_pixel)])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[i][layer] = gram\n\n # make stylized image using backpropogation\n with tf.Graph().as_default():\n if initial is None:\n noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)\n initial = tf.random_normal(shape) * 0.256\n else:\n initial = np.array([vgg.preprocess(initial, mean_pixel)])\n initial = initial.astype('float32')\n image = tf.Variable(initial)\n net, _ = vgg.net(network, image)\n\n # content loss\n content_loss = content_weight * (2 * tf.nn.l2_loss(\n net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) /\n content_features[CONTENT_LAYER].size)\n # style loss\n style_loss = 0\n for i in range(len(styles)):\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n _, height, width, number = map(lambda i: i.value, layer.get_shape())\n size = height * width * number\n feats = tf.reshape(layer, (-1, number))\n gram = tf.matmul(tf.transpose(feats), feats) / size\n style_gram = style_features[i][style_layer]\n style_losses.append(2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size)\n style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)\n # total variation denoising\n tv_y_size = _tensor_size(image[:,1:,:,:])\n tv_x_size = _tensor_size(image[:,:,1:,:])\n tv_loss = tv_weight * 2 * (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) /\n tv_x_size))\n # overall loss\n loss = content_loss + style_loss + tv_loss\n\n # optimizer setup\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n def print_progress(i, last=False):\n stderr.write('Iteration %d/%d\\n' % (i + 1, iterations))\n if last or (print_iterations and i % print_iterations == 0):\n stderr.write(' content loss: %g\\n' % content_loss.eval())\n stderr.write(' style loss: %g\\n' % style_loss.eval())\n stderr.write(' tv loss: %g\\n' % tv_loss.eval())\n stderr.write(' total loss: %g\\n' % loss.eval())\n\n # optimization\n best_loss = float('inf')\n best = None\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(iterations):\n last_step = (i == iterations - 1)\n print_progress(i, last=last_step)\n train_step.run()\n\n if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:\n this_loss = loss.eval()\n if this_loss < best_loss:\n best_loss = this_loss\n best = image.eval()\n yield (\n (None if last_step else i),\n vgg.unprocess(best.reshape(shape[1:]), mean_pixel)\n )", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n file_features = []\n # Read in each image, one by one\n image = mpimg.imread(file)\n # Apply colour conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2HSV))\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2LUV))\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2HLS))\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor((image, cv2.COLOR_RGB2YUV))\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n # print('converting to YCrCb')\n else:\n feature_image = np.copy(image)\n\n # Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n if hist_feat == True:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=hist_bins)\n file_features.append(hist_features)\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:, :, channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n # hog_features = np.concatenate(hog_features)\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n file_features.append(hog_features)\n features.append(np.concatenate(file_features))\n\n # Return list of feature vectors\n return features", "def get_style_features(text, nlp):\n doc = nlp(text)\n \n final_data = {f'mpqa_{k}': v for k, v in doc._.total_argument_types.items()}\n final_data['tb_sentiment'] = doc.sentiment\n final_data['tb_subjectivity'] = doc._.subjectivity\n \n # Return avg for emotions\n emotion_data = doc._.emotions\n emotion_data = {k: v / len(doc) for k, v in emotion_data.items()}\n \n final_data.update(emotion_data)\n \n cur_lemmas = list(set(w.lemma_ for w in doc))\n final_data['lemmas'] = cur_lemmas\n \n return final_data", "def training_features(orientation=8, pix_per_cell=8, cell_per_block=2,\n spatial_size=16, hist_bins=32, color_space='HLS', sample_window=64,\n channels=[0], debug=False):\n def extract(paths, augment=False): # extract and augment\n features = []\n for file in paths:\n image = utils.imread_scaled_unified(file)\n if color_space != ident_config['default_color_space']:\n image_color_converted = cv2.cvtColor(\n image,\n eval('cv2.COLOR_' + ident_config['default_color_space'] + '2' + color_space))\n else:\n image_color_converted = image\n # End of if color_space\n\n image_resized = cv2.resize(image_color_converted, (sample_window, sample_window))\n if augment:\n brightened = utils.brighten(image_resized, bright=1.2)\n flipped = cv2.flip(utils.brighten(image_resized, bright=1.1), 1) # horizontal flip\n to_process = [brightened, flipped]\n else:\n to_process = [image_resized]\n # End of if augment\n\n for x in to_process: # must use square bracket for single element in list to iterate\n # using tuple, it will iterate the single image's row dimension. \n hog_features = utils.get_hog_features_channels(\n x, orientation, pix_per_cell, cell_per_block, channels)\n spatial_features, hist_features = utils.color_features(\n x, spatial_size=spatial_size, hist_bins=hist_bins, channels=channels)\n image_features = np.hstack(\n (spatial_features, hist_features, hog_features)).reshape(1, -1)\n image_features = np.squeeze(image_features)\n # remove the redundant dimension, StandardScaler does not like it\n features.append(image_features)\n # End of for x ...\n # End of for file\n return features\n cars, noncars, cars_to_be_augmented, num_cars, num_noncars = samples_sorted()\n num_samples = 30000 # limit the number of samples to be selected from each group.\n print('num_cars: ', num_cars, ' num_noncars: ', num_noncars, ' max. samples: ', 3*num_samples)\n\n car_features = extract(cars[:min(num_samples, len(cars))], augment=False)\n car_augmented_features = extract(cars_to_be_augmented[:min(num_samples, len(cars_to_be_augmented))], augment=True)\n noncar_features = extract(noncars[:min(num_samples, len(noncars))], augment=False)\n\n # Create an array stack of feature vectors\n X = np.vstack((car_features, car_augmented_features, noncar_features)).astype(np.float64)\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n del X # X, scaled_X consumes much memory, should be released ASAP.\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features) + len(car_augmented_features)), np.zeros(len(noncar_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.1, random_state=rand_state)\n return X_train, X_test, y_train, y_test, X_scaler", "def extract_feature(network_proto_path,\n network_model_path,\n image_list, data_mean, layer_name, image_as_grey = False):\n net = caffe.Net(network_proto_path, network_model_path, caffe.TEST)\n transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n transformer.set_input_scale('data', 1)\n transformer.set_transpose('data', (2, 0, 1))\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n\n shp = blobs[layer_name].shape\n print blobs['data'].shape\n\n batch_size = blobs['data'].shape[0]\n print blobs[layer_name].shape\n\n features_shape = (len(image_list), shp[1])\n features = np.empty(features_shape, dtype='float32', order='C')\n for idx, path in zip(range(features_shape[0]), image_list):\n img = caffe.io.load_image(path, color=False)\n prob = net.forward_all(data=np.asarray([transformer.preprocess('data', img)]))\n print np.shape(prob['prob'])\n blobs = OrderedDict([(k, v.data) for k, v in net.blobs.items()])\n features[idx, :] = blobs[layer_name][0, :].copy()\n print '%d images processed' % (idx + 1)\n features = np.asarray(features, dtype='float32')\n return features", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features", "def output_rule_feature_matrices():\n with open(config.data_path + config.sentiment_seed, 'rb') as input_file:\n sentiment_dict = pickle.load(input_file)\n seed_sentiments = set(sentiment_dict.keys())\n \n for i in range(len(config.file_names)):\n if i is 5:\n print('processing ', config.file_names[i])\n fname = config.file_names[i]\n feature_x, feature_y, opinion_x, opinion_y = text_to_matrix(\n fname, seed_sentiments)\n feature_x = np.transpose(feature_x)\n opinion_x = np.transpose(opinion_x)\n with open('../results/' + fname + '_rule_feature_matrix.pickle', 'wb') as f:\n pickle.dump(feature_x, f)\n with open('../results/' + fname + '_rule_opinion_matrix.pickle', 'wb') as f:\n pickle.dump(opinion_x, f)\n\n with open('../results/' + fname + '_feature_label.pickle', 'wb') as f:\n pickle.dump(feature_y.ravel(), f)\n with open('../results/' + fname + '_opinion_label.pickle', 'wb') as f:\n pickle.dump(opinion_y.ravel(), f)", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def extract_features(self, preprocessed_inputs):\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n feature_map_layout = {\n 'from_layer': ['conv4', '', '', '', '', '', ''],\n 'layer_depth': [-1, 1024, 1024, 512, 256, 256, 256],\n }\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('vgg_16',\n reuse=self._reuse_weights) as scope:\n net, image_features = vgg.vgg_16_base(\n preprocessed_inputs,\n final_endpoint='pool5',\n trainable=False,\n scope=scope)\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n\n return feature_maps.values()", "def extractFeatures(image, mask, name, binCount=8, features=\"all\"):\n def extractType(func, type_name):\n name = []\n values = []\n feat = func(image,mask, binCount=binCount)\n feat.enableAllFeatures() \n feat.execute()\n for (key,val) in six.iteritems(feat.featureValues):\n name.append(key+f'_{type_name}')\n values.append(val)\n return pd.DataFrame([values], columns=name)\n\n dim = image.GetDimension()\n\n features_array = np.array([\"FO\", f\"S{dim}D\", \"GLCM\", \"GLSZM\", \"GLRLM\", \"NGTDM\", \"GLDM\"])\n features_func = np.array([firstorder.RadiomicsFirstOrder, eval(f\"shape{'2D'*(dim == 2)}.RadiomicsShape{'2D'*(dim==2)}\"), \n glcm.RadiomicsGLCM, glszm.RadiomicsGLSZM, glrlm.RadiomicsGLRLM, ngtdm.RadiomicsNGTDM, \n gldm.RadiomicsGLDM])\n if features != \"all\":\n if features is str:\n print(\"Type wrong. Returning None.\")\n return None\n index = pd.Index(features_array).isin(features)\n features_array = features_array[index]\n features_func = features_func[index]\n\n list_feat = list(map(lambda i: extractType(features_func[i], features_array[i]), np.arange(len(features_array))))\n df = pd.concat([pd.DataFrame([name], columns=[\"Caso\"])] + list_feat, axis=1)\n return df", "def preprocess(args, g, features):\n # g = dgl.to_homogeneous(g)\n with torch.no_grad():\n g.edata[\"weight\"] = calc_weight(g)\n g.ndata[\"feat_0\"] = features\n for hop in range(1, args['n_hops'] + 1):\n g.update_all(fn.u_mul_e(f\"feat_{hop - 1}\", \"weight\", \"msg\"),\n fn.sum(\"msg\", f\"feat_{hop}\"))\n hop_feat_list = []\n for hop in range(args['n_hops'] + 1):\n hop_feat_list.append(g.ndata.pop(f\"feat_{hop}\"))\n return hop_feat_list", "def _precompute_image_features(img, layers, shape, save_dir):\n # type: (np.ndarray, Union[Tuple[str], List[str]], Union[Tuple[int], List[int]]) -> Dict[str, np.ndarray]\n features_dict = {}\n g = tf.Graph()\n # Choose to use cpu here because we only need to compute this once and using cpu would provide us more memory\n # than the gpu and therefore allow us to process larger style images using the extra memory. This will not have\n # an effect on the training speed later since the gram matrix size is not related to the size of the image.\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n with tf.variable_scope(\"discriminator\", reuse=False):\n image = tf.placeholder(tf.uint8, shape=shape)\n image_float = tf.image.convert_image_dtype(image,dtype=tf.float32) * 2 - 1\n net = vgg.net(image_float, trainable=False)\n style_pre = np.array([img])\n style_pre = style_pre.astype(np.uint8)\n\n if '0.12.0' in tf.__version__:\n all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n else:\n all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)\n\n discrim_tvars = [var for var in all_vars if var.name.startswith(\"discriminator\")]\n saver = tf.train.Saver(discrim_tvars)\n\n ckpt = tf.train.get_checkpoint_state(save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n raise AssertionError(\"Cannot load from save directory.\")\n\n var_not_saved = [item for item in all_vars if item not in discrim_tvars]\n sess.run(tf.initialize_variables(var_not_saved))\n\n\n for layer in layers:\n # Calculate and store gramian.\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n features_dict[layer] = gram\n return features_dict", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def get_features_image(image, hyperparams):\n # Get all relevant hyperparameters\n color_space = hyperparams['COLOR_SPACE']\n spatial_feat = hyperparams['SPATIAL_FEAT']\n spatial_size = hyperparams['SPATIAL_SIZE']\n hist_feat = hyperparams['HIST_FEAT']\n hist_bin = hyperparams['HIST_BIN']\n hist_range = hyperparams['HIST_RANGE']\n hog_orient = hyperparams['HOG_ORIENT']\n hog_cell_per_block = hyperparams['HOG_CELL_PER_BLOCK']\n hog_pix_per_cell = hyperparams['HOG_PIX_PER_CELL']\n hog_feat = hyperparams['HOG_FEAT']\n hog_channel = hyperparams['HOG_CHANNEL']\n hog_sqrt = hyperparams['HOG_SQRT']\n # Define an empty list to receive features\n img_features = []\n # apply color conversion if other than 'RGB'\n feature_image = convert_color(image, color_space)\n # Compute spatial features if flag is set\n if spatial_feat:\n spatial_features = get_spatial_features(feature_image, size=spatial_size)\n img_features.append(spatial_features)\n # Compute histogram features if flag is set\n if hist_feat:\n _, _, _, _, hist_features = get_hist_features(feature_image, nbins=hist_bin, bins_range=hist_range)\n img_features.append(hist_features)\n # Compute HOG features if flag is set\n if hog_feat:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel], hog_orient, hog_pix_per_cell,\n hog_cell_per_block, vis=False, transform_sqrt=hog_sqrt, feature_vec=True))\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], hog_orient,\n hog_pix_per_cell, hog_cell_per_block, vis=False, transform_sqrt=hog_sqrt, feature_vec=True)\n img_features.append(hog_features)\n # Return concatenated array of features\n return np.concatenate(img_features)", "def features_from_CNN(self):\n\n dataloader = self.datasetManager.get_dataloader()\n print(\"\\nFeatures obtention with CNN\")\n print(\"-\"*15)\n for i, batch in tqdm.tqdm(enumerate(dataloader)):\n img = self.to_device(batch[0])\n img_name = batch[2][0]\n \n temp = re.findall(r'\\d+', img_name)\n res = list(map(int, temp))\n X = res[-2]\n Y = res[-1]\n \n savepath = os.path.join(self.output_dir, 'data%i'%X)\n create_folder(savepath)\n \n out_CNN = self.network(img) \n \n torch.save(out_CNN, os.path.join(savepath,'features_tensor%i.pt'%Y))", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def extract_feat(self, imgs):\n pass", "def extract_feat(self, imgs):\n pass", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def extract_feature(c, pid, wid, extract_info_dict):\n\n image_feature_filter_names = set(\n extract_info_dict['image_feature_filter_name']\n )\n\n # Extract all images (different sites / fields of view)\n c.execute(\n \"\"\"\n SELECT TableNumber, ImageNumber\n FROM Image\n WHERE Image_Metadata_Plate = {} AND Image_Metadata_Well = '{}'\n \"\"\".format(pid, wid)\n )\n\n tid_iid_pairs = c.fetchall()\n\n # Track the feature names (row index)\n row_index = []\n features = []\n\n # Iterate through all sites and extract features from each site\n for p in tid_iid_pairs:\n tid, iid = p[0], p[1]\n\n # Extract image features\n c.execute(\n \"\"\"\n SELECT *\n FROM Image\n WHERE TableNumber = '{}' AND ImageNumber = {}\n \"\"\".format(tid, iid)\n )\n\n result = c.fetchall()\n result = np.array(result[0])\n\n # Filter out some features\n descriptions = [i[0] for i in c.description]\n droped_c = [i for i in range(len(descriptions)) if descriptions[i] in\n image_feature_filter_names]\n result = np.delete(result, droped_c, axis=0)\n\n # Change the data type of result into floats\n result = result.astype(float)\n\n image_feature = result\n image_name = [i for i in descriptions if i not in\n image_feature_filter_names]\n assert(image_name == extract_info_dict['image_name'])\n\n # Extract cell, cytoplasm, and nuclei features\n cell_feature = extract_cell_level_feature(\n c,\n 'Cells',\n tid,\n iid,\n set(extract_info_dict['cell_feature_filter_name']),\n extract_info_dict['cell_name']\n )\n\n cytoplasm_feature = extract_cell_level_feature(\n c,\n 'Cytoplasm',\n tid,\n iid,\n set(extract_info_dict['cytoplasm_feature_filter_name']),\n extract_info_dict['cytoplasm_name']\n )\n\n nuclei_feature = extract_cell_level_feature(\n c,\n 'Nuclei',\n tid,\n iid,\n set(extract_info_dict['nuclei_feature_filter_name']),\n extract_info_dict['nuclei_name']\n )\n\n # Combine image feature, cell level medians together\n cur_feature = np.hstack((image_feature,\n cell_feature,\n cytoplasm_feature,\n nuclei_feature))\n\n # Add the current feature into the well feature collections\n features.append(cur_feature)\n row_index.append('{}_{}_{}'.format(pid, wid, iid))\n\n features = np.vstack(features)\n return features, row_index", "def feature_extractor(inputs,\n n_nodes,\n batch_size):\n geometry_input = inputs[:, :, :3]\n morphology_input = inputs[:, :, 3:]\n\n # adjacency = \\\n # K.concatenate([K.zeros(shape=(batch_size, 1, n_nodes)),\n # morphology_input], axis=1) # add soam\n adjacency = morphology_input\n full_adjacency = \\\n batch_full_matrix(adjacency, n_nodes, batch_size)\n # geometry_input = K.concatenate([K.zeros(shape=(batch_size, 1, 3)),\n # geometry_input], axis=1)\n\n # distance = distance_from_parent(adjacency,\n # geometry_input,\n # n_nodes,\n # batch_size)\n\n # distance = locations_by_distance_from_parent(full_adjacency=full_adjacency,\n # distance_from_parent=geometry_input,\n # batch_size=batch_size)\n #\n\n filled_full_adjacency_x = \\\n full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:, :, 0], 2), n_nodes, axis=2)\n filled_full_adjacency_y = \\\n full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:, :, 1], 2), n_nodes, axis=2)\n filled_full_adjacency_z = \\\n full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:, :, 2], 2), n_nodes, axis=2)\n\n features = K.concatenate([adjacency,\n full_adjacency,\n geometry_input,\n filled_full_adjacency_x,\n filled_full_adjacency_y,\n filled_full_adjacency_z], axis=2)\n return features", "def extract_features(self, inputs):\n pass", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def __get_features(net, image_tensor):\n layers_idx = ['0', '5', '10', '19', '21', '28']\n features = {}\n for i, layer in enumerate(net.features):\n image_tensor = layer.forward(image_tensor)\n if str(i) in layers_idx:\n features[str(i)] = image_tensor\n return features", "def extract(self,image_path):#image_path\r\n\r\n img = caffe.io.load_image(image_path)\r\n \r\n #image1=cv2.imread(caffe_root + 'examples/images/cat.jpg') \r\n #img=cv2.cvtColor(image1,cv2.COLOR_BGR2RGB) \r\n #img=img/255. \r\n \r\n\r\n transformed_image = self.transformer.preprocess('data', img)\r\n self.net.blobs['data'].data[...] = transformed_image\r\n ft = self.net.forward()\r\n ft = np.squeeze(ft['pool5/7x7_s1'])\r\n ft = ft / LA.norm(ft)\r\n return ft", "def feature_extraction(images, save_to='dataset.csv'):\n num_images = len(images)\n logging.info(f\"Extracting features from {num_images} images...\")\n x = np.zeros((num_images, 7))\n y = np.zeros(num_images, dtype=np.int8)\n\n for i, image in enumerate(images):\n logging.info(f\"Processing Image {i+1}/{num_images}...\")\n y[i] = 0 if image.name.startswith('cyl') \\\n else 1 if image.name.startswith('inter') \\\n else 2 if image.name.startswith('let') \\\n else 3 if image.name.startswith('mod') \\\n else 4 if image.name.startswith('para') \\\n else 5 if image.name.startswith('super') \\\n else 6 if image.name.startswith('svar') else -1\n \n # Get number of object pixels in segmented color channels, which become features 0-3\n for color in [0,1,2,4]: # 3 is the color index for RGB so we skip that and use 4 (grayscale)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n if len(uniques) > 2:\n image = image.otsu(color)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n x[i,color if color is not 4 else 3] = counts[0]\n\n x[i,4] = np.std(image.getHistogram(4))\n\n x[i,5] = np.argmax(image.getHistogram(4))\n\n x[i,6] = np.argmin(image.getHistogram(4))\n\n # Save new dataset to file\n np.savetxt(save_to, np.concatenate([x,np.atleast_2d(y).T], axis=1), delimiter=',', fmt='%s')\n\n return x, y", "def calculate_gradients(image, style_targets, content_targets, \n style_weight, content_weight):\n\n ### START CODE HERE ###\n with tf.GradientTape() as tape:\n \n # get the style image features\n style_features = get_style_image_features(image)\n \n # get the content image features\n content_features = get_content_image_features(image)\n \n # get the style and content loss\n loss = get_style_content_loss(style_targets,style_features,content_targets,content_features,style_weight,content_weight)\n\n # calculate gradients of loss with respect to the image\n gradients = tape.gradient(loss, image)\n\n ### END CODE HERE ###\n\n return gradients", "def raw_features_extractor(database='./red_cod.db.pkl', sites=-1, elements = -1, maxatoms= -1,\r\n dictionary='diccionario', features='datosrahm.csv'):\r\n \r\n df=create_collection(database=database,sites=sites, elements=elements, maxatoms=maxatoms, \r\n dictionary=dictionary)\r\n \r\n start=time.time()\r\n \r\n datos=pd.read_csv(features)\r\n datos=datos.fillna(-1)\r\n\r\n dicc=dict(datos[['Symbol','Z']].values)\r\n\r\n dicc['D']=1\r\n dicc['Bk']=97\r\n dicc['Cf']=98\r\n dicc['Es']=99\r\n dicc['Fm']=100\r\n dicc['Md']=101\r\n dicc['No']=102\r\n dicc['Lr']=103\r\n \r\n max_sitios = max(df['sitios'].values)\r\n\r\n df=df[df['sitios'] <= max_sitios].reset_index(drop=True)\r\n \r\n X=np.zeros((len(df),max_sitios,104))\r\n y=np.zeros((len(df),1))\r\n mult=np.zeros((len(df),max_sitios))\r\n wyckmul=np.load('support/WyckoffSG_dict.npy').item()['wyckmul']\r\n \r\n for row in range(len(df)):\r\n \r\n item=df['WyckOcc'][row]\r\n sitios=list(item.values()) \r\n sitocc=np.zeros((len(sitios),104))\r\n spacegroup = str(df['sgnum'][row]).zfill(3)\r\n \r\n try:\r\n \r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n sitios] for i in j]\r\n \r\n except:\r\n print('There exists an error concerning with the space group of CIF ', df['cif'][row],'\\n')\r\n print('Please check in www.crystallography.net to provide the correct space group number of that CIF',\r\n '\\n','\\n')\r\n spacegroup=input('Give me the correct spacegroup:'+'\\n'+'\\n')\r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n list(df['WyckOcc'][row].values())] for i in j]\r\n \r\n occs=[]\r\n for i in range(len(sitios)):\r\n\r\n for j in list(sitios[i].values()):\r\n \r\n ocupacion=np.array(list(j.values()))\r\n llaves=[llave.replace('+','').replace('-','').replace('1',\r\n '').replace('2','').replace('3','').replace('4',\r\n '') for llave in np.array(list(j.keys()))]\r\n llaves=[llave.replace('.','') for llave in llaves]\r\n llaves=[llave.replace('5','').replace('6','').replace('7',\r\n '').replace('8','').replace('9','').replace('0',\r\n '') for llave in llaves]\r\n vector=np.zeros((1,104))\r\n occs=[sum(ocupacion)]+occs\r\n \r\n try:\r\n \r\n idx=[dicc[k] for k in llaves]\r\n \r\n except:\r\n \r\n print(' ELEMENTO NO IDENTIFICADO EN LA LISTA ',llaves,'\\n',\r\n 'REVISA EL SIGUIENTE CIF PARA HACER LA CORRECCION:','\\t',df['cif'][row])\r\n \r\n former = input('Elemento Incorrecto: ')\r\n current = input('Elemento Correcto: ')\r\n \r\n llaves=[current if x == former else x for x in llaves]\r\n idx=[dicc[k] for k in llaves]\r\n \r\n \r\n for k in idx:\r\n vector[0][k-1] = ocupacion[idx.index(k)]\r\n \r\n \r\n sitocc[i]=vector\r\n \r\n while sitocc.shape[0] != max_sitios:\r\n sitocc=np.concatenate((np.zeros((1,104)),sitocc))\r\n s=[0]+s\r\n \r\n X[row,:,:]=sitocc\r\n y[row]=df['target'][row]\r\n mult[row]=s\r\n \r\n S = np.expand_dims(mult,axis=2)\r\n features=datos.iloc[:,2:].values\r\n x=X[:,:,:96]\r\n \r\n fracsum = np.expand_dims(np.sum(x,axis=2), axis=2)\r\n \r\n x=np.dot(x,features) \r\n\r\n print('Atomic radii and electronegativities for each Wyckoff site extracted in',\r\n round(time.time()-start,2),' s') \r\n \r\n np.save('raw_features', x)\r\n np.save('output_values', y)\r\n np.save('multiplicities', S)\r\n np.save('occupation_fractions', fracsum)\r\n \r\n return x, y, S, fracsum, df", "def extract_features(self, preprocessed_inputs):\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n bottomup_features_names = ['Conv2d_11_pointwise', 'Conv2d_13_pointwise']\n num_appended_layers = 4\n appended_channel_num = [512, 256, 256, 256]\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n preprocessed_inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n topdown_features = self._topdown_feature_maps(\n image_features,\n bottomup_features_names=bottomup_features_names,\n num_appended_layers=num_appended_layers,\n appended_channel_num=appended_channel_num)\n return topdown_features.values()", "def extract_features_from_imglist(image_path_list,\n color_space='RGB',\n spatial_size=(32, 32),\n hist_bins=32,\n orient=9,\n pix_per_cell=8,\n cell_per_block=2,\n hog_channel=0,\n use_spatial_feat=True,\n use_hist_feat=True,\n use_hog_feat=True,\n visualize=False):\n\n features = []\n\n for image_path in tqdm(image_path_list):\n # print(image_path)\n img = mpimg.imread(image_path)\n\n img_features = extract_features(img,\n color_space=color_space,\n spatial_size=spatial_size,\n hist_bins=hist_bins,\n orient=orient,\n pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel,\n use_spatial_feat=use_spatial_feat,\n use_hist_feat=use_hist_feat,\n use_hog_feat=use_hog_feat,\n visualize=visualize)\n\n features.append(img_features)\n\n return features", "def getfeaturesandlabels(lst, exptype=False, semantic=True, predict=True):\n if 'PGATE' in lst[0][0]:\n print \"Get features from {} expressions.\".format('predicted' if predict else 'gold')\n else:\n print \"Get features from gold expressions. (No PGATE in token)\"\n predict = False\n \n stats = {'holders_not_in_candidates': [],\n 'position': {},\n 'expt_not_in_candidates': []}\n if not exptype:\n exptypelist = EXPTYPES\n features = {}\n labels = {}\n pos = {}\n ev = evaluate()\n for expt in EXPTYPES:\n features[expt] = []\n labels[expt] = []\n pos[expt] = []\n features[expt+'implicit'] = []\n labels[expt+'implicit'] = []\n pos[expt+'implicit'] = []\n features[expt+'w'] = []\n labels[expt+'w'] = []\n pos[expt+'w'] = []\n for sent_i, sent in enumerate(lst):\n if DEBUG: print \"---\", sent_i\n if sent_i % 1000 == 0: print \"setning\", sent_i\n daughterlists_sent(sent)\n ex = getexpressions_sent(sent)\n pex = getexpressions_sent(sent, predict=predict)\n tagholdercandidates_sent(sent, predict=predict)\n candidates = getholdercandidates_list_sent(sent)\n holder_dct = getholders_sent_new(sent)\n holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct, test=predict)\n count_gold(holder_exp_pairs) \n if True: # syntactic_path\n paths = getpaths_sent(getgraph_sent(sent))\n else:\n paths = False\n if predict:\n\n holder_exp_pairs_sys = []\n\n for c, p in enumerate(extolst(pex, gatekey='PGATE')):\n # first located e' that corresponded to e\n argmaxcxe = 0 # at least some overlap\n if args.argmaxcxe:\n argmaxcxe = int(args.argmaxcxe)\n current_pair = None\n for exp_pair_i, exp_pair in enumerate(holder_exp_pairs):\n #argmax c(x,e) regardless of exp type j&m 7.1.1\n if DEBUG:\n print exp_pair\n cxe = ev.spancoverage(exp_pair[0], p['token_id']) \n if DEBUG:\n print cxe\n if cxe > argmaxcxe:\n argmaxcxe = cxe\n current_pair = exp_pair\n if current_pair:\n holder_exp_pairs_sys.append((p['token_id'], current_pair[1], current_pair[2], current_pair[3]))\n else:\n counters['falsely_detected_exp'] += 1\n counters['falsely_detected_exp' + p['expt']] += 1\n \n if predict:\n holder_exp_pairs_use = holder_exp_pairs_sys\n else:\n holder_exp_pairs_use = holder_exp_pairs\n holder_exp_pairs_use = count_sys(holder_exp_pairs_use, save=True)\n for exp_pair in holder_exp_pairs_use:\n expt = exp_pair[2]\n cand_exists = True\n holder_set = True\n # Categorise \n if isinstance(exp_pair[1], str):\n #if predict:\n holder_set = False\n elif isinstance(exp_pair[1], set):\n # om holder ikke er hc\n #print candidates\n if expt in candidates:\n if not exp_pair[1].intersection(candidates[expt]):\n counters['holder_not_in_candidate_head'] += 1\n cand_exists = False\n for cand in candidates[expt]:\n if exp_pair[1].intersection(get_subtree(sent, cand, transitive=True)):\n cand_exists = True\n if not cand_exists:\n counters['holder_not_in_candidates'] += 1\n counters['holder_not_in_candidates' + exp_pair[2]] += 1\n stats['holders_not_in_candidates'].append({'candidates': candidates[expt],\n 'exp_pair': exp_pair})\n else:\n cand_exists = False\n counters['ignore_count'] += 1\n counters['holder not in candidates - special case'] += 1\n #if cand_exists:\n # For prediction:\n elif isinstance(exp_pair[1], OrderedDict):\n if expt in candidates:\n holdermax = argmaxcxh(exp_pair[1], candidates[expt])\n if not holdermax[0]:\n cand_exists = False\n counters['ignore_count'] += 1\n else:\n cand_exists = False\n counters['expt_not_in_candidates - new'] += 1\n stats['expt_not_in_candidates'].append({'sent': sent_i,\n 'exp_pair': exp_pair})\n else:\n raise Exception('exp_pair[1] of unknown type: {}'.format(exp_pair[1]))\n\n if not predict or cand_exists:\n # we don't need to count false predicted holders, the p. sum is already\n # made, but we need these for training\n \n # ext-classifiers (w/imp)\n # labels\n if exp_pair[1] == 'w':\n labels[expt + 'w'].append(True)\n labels[expt + 'implicit'].append(False)\n elif exp_pair[1] == 'implicit':\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(True)\n else:\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(False)\n\n # Features\n featuresdict = {}\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n features[expt + 'w'].append(featuresdict)\n #features[expt + 'implicit'].append(featuresdict)\n pos[expt + 'w'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'w'})\n pos[expt + 'implicit'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'implicit'})\n\n if cand_exists:\n # internals\n if expt in candidates:\n featuresandlabeladded = False\n for cand in candidates[expt]:\n if args.restrict == 'sameexp' and cand in exp_pair[0]: #get_subtree(sent, cand, transitive=True)):\n pass\n else:\n featuresdict = {}\n if holder_set:\n featuresandlabeladded = True\n\n # labels\n if isinstance(exp_pair[1], OrderedDict):\n label = cand_in_ghodct(cand, exp_pair[1])\n if isinstance(exp_pair[1], set):\n label = cand in exp_pair[1]\n elif isinstance(exp_pair[1], str):\n label = cand == exp_pair[1]\n labels[expt].append(label)\n\n # positions\n pos[expt].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_sys': get_subtree(sent, cand, transitive=True),\n 'holder_gold': exp_pair[1],\n 'coref_gold': exp_pair[3],\n 'exptype' : expt\n }) \n\n # features\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['synt_path'] = syntactic_path(cand, ex_head,\n sent, paths=paths)\n if semantic:\n tmp = shallow_sem_relation(cand-1, ex_head-1, sent)\n if tmp:\n featuresdict['shal_sem_rel'] = tmp\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n featuresdict['cand_head_word'] = sent[cand-1]['form']\n featuresdict['cand_head_pos'] = sent[cand-1]['pos']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n if cand > 1:\n featuresdict['context_r_word'] = sent[cand-2]['form']\n featuresdict['context_r_pos'] = sent[cand-2]['pos']\n if cand < len(sent):\n featuresdict['context_l_word'] = sent[cand]['form']\n featuresdict['context_l_pos'] = sent[cand]['pos']\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n \n features[expt].append(featuresdict)\n else:\n counters[\"expt_not_in_candidates\"] += 1\n counters[\"expt_not_in_candidates\" + expt] += 1\n\n stats['positions'] = pos\n return features, labels, stats", "def extract_features(\n img,\n n_sigmas,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16,\n):\n if multichannel: #img.ndim == 3 and multichannel:\n all_results = (\n extract_features_2d(\n dim,\n img[..., dim],\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n for dim in range(img.shape[-1])\n )\n features = list(itertools.chain.from_iterable(all_results))\n else:\n features = extract_features_2d(0,\n img,\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Feature extraction complete')\n\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n logging.info('Memory mapping features to temporary file')\n\n features = memmap_feats(features)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return features #np.array(features)", "def extract_feat(self, img):\n xb = self.backbone(img)\n if self.with_neck:\n xn = self.neck(xb)\n #for xx in xb:\n # print(xx.shape)\n # print(xb[2].shape)\n return [xb[2]], xn", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def get_features(self, feature_type=\"all\"):\n # if exists(path=\"data.csv\"):\n # return pd.read_csv(\"data.csv\")\n # else:\n # reading through directory\n for file_path in self.list_news_path:\n with open(file_path, 'r') as f:\n\n # open document to read and assign to doc\n doc = json.load(f)\n # skip the empty title or body\n if doc['title'] == \"\" or doc['text'] == \"\":\n pass\n else:\n # to extract all data from news content\n if feature_type == \"all\":\n news = doc['title'] + doc['text']\n\n # preprocesses news content\n words = preprocess(news)\n yield words\n\n # to extract title and text as a pair\n elif feature_type == \"pair\":\n title = preprocess(doc[\"title\"])\n body = preprocess(doc['text'])\n yield title, body\n # if not title or not body:\n # pass\n # else:\n # yield title, body\n\n # else you only need either title or body\n else:\n assert feature_type in doc.keys(), \"feature not in the document: \" + file_path\n # without stemming\n # CUSTOM_FILTERS = [lambda x: x.lower(), strip_tags, strip_punctuation, strip_multiple_whitespaces,\n # strip_numeric, remove_stopwords]\n\n feature = doc[feature_type]\n words = preprocess(feature)\n # using alternative preprocessing function\n # words = preprocess_string(words, filters=CUSTOM_FILTERS)\n yield words", "def single_img_features(img, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True, vis=False):\n # 1) Define an empty list to receive features\n img_features = []\n # 2) Apply colour conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor((img, cv2.COLOR_RGB2HSV))\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor((img, cv2.COLOR_RGB2LUV))\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor((img, cv2.COLOR_RGB2HLS))\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor((img, cv2.COLOR_RGB2YUV))\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n # print('converting to YCrCb')\n else:\n feature_image = np.copy(img)\n\n # 3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n # 4) Append features to list\n img_features.append(spatial_features)\n # 5) Compute histogram features if flag is set\n if hist_feat == True:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=hist_bins)\n # 6) Append features to list\n img_features.append(hist_features)\n # 7) Compute HOG features if flag is set\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:, :, channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n hog_features = np.concatenate(hog_features)\n # hog_features = np.ravel(hog_features)\n else:\n if vis == True:\n hog_features, hog_image = get_hog_features(feature_image[:, :, hog_channel], orient,\n pix_per_cell, cell_per_block, vis=True,\n feature_vec=True)\n else:\n hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # 8) Append features to list\n img_features.append(hog_features)\n\n # 9) Return concatenated array of features\n if vis == True:\n return np.concatenate(img_features), hog_image\n else:\n return np.concatenate(img_features)", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def run_style_transfer(cnn, normalization_mean, normalization_std,\n args, content_layers_default, style_layers_default, num_steps,\n style_weight, content_weight): # default: style_weight = 1e6, content_weight = 1\n content_img = image_loader(args.content, args.img_size)\n style_img = image_loader(args.style, args.img_size)\n input_img = content_img.clone()\n assert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"\n \n logprint('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img, \n args, content_layers_default, style_layers_default)\n \n if args.fft:\n input_img = fft_image(input_img.shape).to(device, torch.float) # convert to fft parameterization\n optimizer = get_input_optimizer(input_img)\n \n logprint('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n def closure():\n input_img.data.clamp_(0, 1) # correct the values of updated input image\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for layer_name, sl in style_losses.items():\n style_score += sl.loss\n if args.plot_feature and run[0] == num_steps: # visualize feature maps at the last iter\n analyze_gram(sl.gram, layer_name) # analyze the gram matrix, like SVD analysis\n visualize_feature_map(sl.feat, layer_id=layer_name, save_dir=logger.gen_img_path, prefix=prefix, ext=args.ext)\n\n for layer_name, cl in style_losses.items():\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n logprint(\"run {}:\".format(run))\n logprint('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))\n return style_score + content_score\n\n optimizer.step(closure)\n if run[0] % 100 == 0:\n input_img.data.clamp_(0, 1)\n content_name = os.path.split(args.content)[1].split('.')[0] \n style_name = os.path.split(args.style)[1].split('.')[0]\n out_path = \"%s/%s__%s__%s_iter%d.jpg\" % (logger.gen_img_path, content_name, style_name, args.net, run[0])\n vutils.save_image(input_img, out_path)", "def _getFeatures(self, image):\n\n self.model.eval()\n lin_block = 0\n blockwise_features = [image]\n feature = image\n\n for m in self.model.modules():\n # Assume modules are arranged in \"chronological\" fashion\n\n if isinstance(m, nn.ReLU):\n # Get pre-ReLU activations for conv layers\n if len(feature.size()) == 4:\n blockwise_features.append(feature)\n\n if linearity_test(m) is not None:\n if isinstance(m, nn.Linear):\n feature = feature.view(feature.size(0),-1)\n feature = m(feature)\n\n return feature, blockwise_features", "def get_style1_features(self):\n return self.style1_features", "def get_features(self) -> Generator[np.ndarray, None, None]:\n for text in self.texts:\n yield embed(text)", "def _extract_features(self, a_rel, a_parses):\n feats = {}\n doc_id = a_rel[DOC_ID]\n toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG1)\n toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG2)\n self._get_product_rules(feats, doc_id, a_rel, a_parses)\n self._get_dep_rules(feats, doc_id, a_rel, a_parses)\n self._get_first_last_toks(feats, toks_pos1, toks_pos2)\n self._get_modality(feats, toks_pos1, toks_pos2)\n self._get_vb_class(feats, toks_pos1, toks_pos2)\n self._get_brown_clusters(feats, toks_pos1, toks_pos2)\n self._get_inquirer(feats, toks_pos1, toks_pos2)\n self._get_MPQA(feats, toks_pos1, toks_pos2)\n return feats", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n return x", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.maxpool2(x)\n\n x = self.inception3a(x)\n x = self.inception3b(x)\n x = self.maxpool3(x)\n x = self.inception4a(x)\n\n x = self.inception4b(x)\n x = self.inception4c(x)\n x = self.inception4d(x)\n\n x = self.inception4e(x)\n x = self.maxpool4(x)\n x = self.inception5a(x)\n x = self.inception5b(x)\n x = self.avgpool(x)\n x = torch.flatten(x,1)\n x = self.dropout(x)\n return x", "def fit_style_transfer(style_image, content_image, style_weight=1e-2, content_weight=1e-4, \n optimizer='adam', epochs=1, steps_per_epoch=1):\n\n images = []\n step = 0\n\n # get the style image features \n style_targets = get_style_image_features(style_image)\n \n # get the content image features\n content_targets = get_content_image_features(content_image)\n\n # initialize the generated image for updates\n generated_image = tf.cast(content_image, dtype=tf.float32)\n generated_image = tf.Variable(generated_image) \n \n # collect the image updates starting from the content image\n images.append(content_image)\n \n for n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n \n ### START CODE HERE ###\n # Update the image with the style using the function that you defined\n \n update_image_with_style(image=generated_image,\n style_targets=style_targets,\n content_targets=content_targets,\n style_weight=style_weight,\n content_weight=content_weight,\n optimizer=optimizer)\n\n ### END CODE HERE\n\n print(\".\", end='')\n if (m + 1) % 10 == 0:\n images.append(generated_image)\n \n # display the current stylized image\n clear_output(wait=True)\n display_image = tensor_to_image(generated_image)\n display_fn(display_image)\n\n # append to the image collection for visualization later\n images.append(generated_image)\n print(\"Train step: {}\".format(step))\n \n # convert to uint8 (expected dtype for images with pixels in the range [0,255])\n generated_image = tf.cast(generated_image, dtype=tf.uint8)\n \n return generated_image, images", "def precompute_features(imgs:list, GTs:list, phi:models) -> list:\n G_Ts = [1 - GTn for GTn in GTs]\n IGms = [GTn * In for GTn, In in zip(GTs, imgs)] \n I_Gms = [G_Tn * In for G_Tn, In in zip(G_Ts, imgs)] \n features = [(phi(IGm), phi(I_Gm)) for IGm, I_Gm in zip(IGms, I_Gms)]\n return features", "def compute_features(depc, gid_list, config=None):\n logger.info('[ibs] Preprocess Features')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n ibs.assert_valid_gids(gid_list)\n ######################################################################################\n\n if config['framework'] in ['keras']:\n from keras.preprocessing import image as preprocess_image\n\n thumbnail_config = {\n 'draw_annots': False,\n 'thumbsize': (500, 500),\n }\n thumbpath_list = depc.get(\n 'thumbnails',\n gid_list,\n 'img',\n config=thumbnail_config,\n read_extern=False,\n ensure=True,\n )\n\n target_size = (224, 224)\n if config['model'] in ['vgg', 'vgg16']:\n from keras.applications.vgg16 import VGG16 as MODEL_CLASS\n from keras.applications.vgg16 import preprocess_input\n ######################################################################################\n elif config['model'] in ['vgg19']:\n from keras.applications.vgg19 import VGG19 as MODEL_CLASS\n from keras.applications.vgg19 import preprocess_input\n ######################################################################################\n elif config['model'] in ['resnet']:\n from keras.applications.resnet50 import ResNet50 as MODEL_CLASS # NOQA\n from keras.applications.resnet50 import preprocess_input\n ######################################################################################\n elif config['model'] in ['inception']:\n from keras.applications.inception_v3 import InceptionV3 as MODEL_CLASS # NOQA\n from keras.applications.inception_v3 import preprocess_input\n\n target_size = (299, 299)\n ######################################################################################\n else:\n raise ValueError(\n 'specified feature model is not supported in config = {!r}'.format(config)\n )\n\n # Build model\n model = MODEL_CLASS(include_top=False)\n\n thumbpath_iter = ut.ProgIter(thumbpath_list, lbl='forward inference', bs=True)\n for thumbpath in thumbpath_iter:\n image = preprocess_image.load_img(thumbpath, target_size=target_size)\n image_array = preprocess_image.img_to_array(image)\n image_array = np.expand_dims(image_array, axis=0)\n image_array = preprocess_input(image_array)\n features = model.predict(image_array)\n if config['flatten']:\n features = features.flatten()\n yield (features,)\n elif config['framework'] in ['torch']:\n from wbia.algo.detect import densenet\n\n if config['model'] in ['densenet']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails',\n gid_list,\n 'img',\n config=config_,\n read_extern=False,\n ensure=True,\n )\n feature_list = densenet.features(thumbpath_list)\n else:\n raise ValueError(\n 'specified feature model is not supported in config = {!r}'.format(config)\n )\n\n for feature in feature_list:\n if config['flatten']:\n feature = feature.flatten()\n yield (feature,)\n else:\n raise ValueError(\n 'specified feature framework is not supported in config = {!r}'.format(config)\n )", "def get_features(self, imgfile):\n if self.layer not in self.net.blobs:\n raise TypeError(\"Invalid layer name: \" + self.layer)\n caffeimg = caffe.io.load_image(imgfile)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', caffeimg)\n output = self.net.forward()\n return self.net.blobs[self.layer].data[0]", "def extractFeatures(self, datum):\n abstract", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def extract_features(image_path: str, features: [DefinitionFeature], labeled_pixels: [LabeledPixel] = None,\n nb_random_pixel_to_add: int = 0) -> FeatureTable:\n image = Image(image_path)\n # If there are not labeled pixel, we are predicting the image,\n # so we will extract all the pixels, so we retrieve them\n if labeled_pixels is None:\n labeled_pixels = set()\n for x in list(range(image.width)):\n for y in list(range(image.height)):\n labeled_pixels.add(LabeledPixel((x, y), 'Unknown'))\n\n if nb_random_pixel_to_add > image.width * image.height - len(labeled_pixels):\n raise AttributeError(\n \"There are not enough pixels to add \" + str(nb_random_pixel_to_add) + \" C0 pixels into the image.\")\n\n # We retrieve the list of features to use\n feature_to_compute = set()\n features = sorted(list(features))\n for feature in features:\n feature_to_compute.add((feature.required_feature, feature.mask_size))\n\n # Add some random pixels\n i = 0\n while i < nb_random_pixel_to_add:\n x, y = random.randint(0, image.width - 1), random.randint(0, image.height - 1)\n old_size = len(labeled_pixels)\n labeled_pixels.add(LabeledPixel((x, y), 'C0'))\n i = i + len(labeled_pixels) - old_size\n result = FeatureTable()\n labeled_pixels = sorted(labeled_pixels)\n\n # For each labeled pixel, we extract the features and add an individual to the result\n for labeled_pixel in labeled_pixels:\n dictionary = dict()\n for feature in feature_to_compute:\n dictionary.update(feature[0].compute(labeled_pixel.pixel, image, feature[1]))\n sample = list()\n for feature in features:\n sample.append(dictionary[feature.full_name])\n result.add_individual(\n Individual(labeled_pixel.label, sample, (labeled_pixel.pixel[0], labeled_pixel.pixel[1])))\n return result", "def extract_features_shapenet(images, output_size, use_batch_norm, dropout_keep_prob):\n\n # 4X conv2d + pool blocks\n h = conv2d_pool_block(images, use_batch_norm, dropout_keep_prob, 'same','fe_block_1')\n h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'same','fe_block_2')\n h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'same','fe_block_3')\n h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'same', 'fe_block_4')\n\n # flatten output\n h = tf.contrib.layers.flatten(h)\n\n # dense layer\n h = dense_block(h, output_size, use_batch_norm, dropout_keep_prob, 'fe_dense')\n\n return h", "def get_features(img1,mask1, depth1):\n colors = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)\n img3 = img1.copy()\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img1 = clahe.apply(img1) # Applying Clahe\n kp, des = orb.detectAndCompute(img1, mask=mask1) # Computing ORB features\n kp_pts = np.float32([ kp[m].pt for m in range(len(kp))]).reshape(-1,2)\n # Getting Colors\n col = []\n for i in range(len(kp)):\n col.append(colors[kp_pts[i,1].astype(int), kp_pts[i,0].astype(int)])\n col = np.array(col)\n # Getting 2D points\n kp_2d = []\n for m in range(len(kp)):\n kp_2d.append([int(kp[m].pt[0]), int(kp[m].pt[1])])\n kp_2d = np.array(kp_2d).reshape(-1,2)\n \n # Getting the 3D points\n kp_3d, _, _ = convert_3d(kp_2d, depth1, img3)\n \n # Removing points with Zero depth\n my_ind = np.where(kp_3d[:,2]!=0)[0]\n new_kp_3d = kp_3d[my_ind,:]\n new_kp_2d = kp_2d[my_ind,:]\n new_des = des[my_ind,:]\n new_col = col[my_ind,:]\n \n # Removing the duplicates\n uni_3d = np.unique(new_kp_3d, return_index= True, axis=0)[1]\n new_kp_3d1 = new_kp_3d[uni_3d,:]\n new_kp_2d1 = new_kp_2d[uni_3d,:]\n new_des1 = new_des[uni_3d,:]\n new_col1 = new_col[uni_3d,:]\n return kp_3d, kp_2d, des, col", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n\n # In the case of the Hog Feature, we already given the base parameters for using hog feature function.\n # TA - You can just use that parameter with each subdivide image (which has image grid size * image grid size)\n # Thank you for the reply. Does it mean to divide the image into 20x20 size sub-images and perform the feature extraction on each image??\n # TA - Yes. In the SIFT, image grid size is different.\n\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n # Your code here. You should also change the return value.\n\n # sample visualizing\n # cv2.imshow('img', img)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n hog = cv2.HOGDescriptor(win_size,\n block_size,\n block_stride,\n cell_size,\n nbins,\n deriv_aperture,\n win_sigma,\n histogram_norm_type,\n l2_hys_threshold,\n gamma_correction,\n nlevels)\n\n # additional parameters\n\n #hist = hog.compute(gray,winStride,padding,locations)\n\n #TODO: Check if this is valid???\n\n hist = hog.compute(gray)\n hist_resized = np.resize(hist, (int(len(hist)/36), 36))\n hist_resized\n return hist_resized\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n\n #input image size 240 * 200 ==> divide H, W by 20 ==> 12 * 10 = 120\n #in case of this input image, the number of feature is 120.\n #So the number of feature is changed according to input image size.\n\n #IF PROBLEMS WITH DEPENDENCIES: pip3 install opencv-contrib-python==3.4.2.16\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n\n return des", "def ExtractDataSetFeatures(dir_name,imageExt ='pgm',features_type='hog',cnn_model_path='C:\\\\Users\\\\IssaMawad\\\\Documents\\\\DSIP_ML_Project\\\\cnn\\\\models\\\\20170512-110547.pb'):\n if(features_type=='facenet'):\n return EvaluateCNN(dir_name,cnn_model_path)\n features = [];\n classes = [];\n for subdir, dirs, files in os.walk(dir_name):\n for dir in dirs:\n dirFull = os.path.join( dir_name,dir)\n for innerSubDir,innerDirs,innerFiles in os.walk(dirFull):\n #if(len(innerFiles)<8):\n # continue;\n #print(dir_name)\n for file in innerFiles:\n if(not file.endswith(imageExt)):\n continue;\n fullFile = os.path.join(dirFull,file)\n if(features_type=='hog'):\n features.append(extractHOGFeatures(ReadImage(fullFile)))\n if(features_type=='gbrdct'):\n features.append(extractGaborDCT(ReadImage(fullFile)))\n if(features_type=='lgbphs'):\n features.append(extractLGBPHS(ReadImage(fullFile)))\n if(features_type=='gabor'):\n features.append(extractGabor(ReadImage(fullFile)))\n #if(features_type=='gbrzk'):\n # features.append(extractGaborZernike(ReadImage(fullFile)))\n if(features_type=='dum'):\n features.append(ReadImage(fullFile).ravel())\n classes.append(dir)\n return np.asarray(features),np.asanyarray(classes)", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def get_language_features(self, queries, gram):\n queries_dim = queries.dim()\n\n if queries_dim==3:\n N = queries.size(0)\n M = queries.size(1)\n num_words = self.num_words[gram]\n queries = queries.view(-1, num_words) # resize (N,M,k) -> (N*M,k)\n\n language_feats = self.language_nets[self.gram_id[gram]](queries)\n\n if queries_dim==3:\n language_feats = language_feats.view(N, M, -1)\n\n return language_feats", "def get_features(image, model, layers=None):\n ## content representation\n if layers is None:\n layers = {'0': 'conv1_1',\n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1',\n '21': 'conv4_2', \n '28': 'conv5_1'}\n \n features = {}\n x = image\n # model._modules is a dictionary holding each module in the model\n for name, layer in model._modules.items():\n x = layer(x)\n if name in layers:\n features[layers[name]] = x\n \n return features", "def extract_features(img):\n # load models\n model = FeatureExtractor(CFG)\n model.load_model()\n feature_extractor = model.feature_extractor()\n\n # extract features \n print(type(img))\n extracted_features = feature_extractor.predict([img])\n\n # reduce dimension\n pca_model = joblib.load(PCA_MODEL_DIRECTORY)\n reduced_img = pca_model.transform(extracted_features)\n return reduced_img", "def get_style2_features(self):\n return self.style2_features", "def parse_features(self, skip=...):\n ...", "def parse_features(self, skip=...):\n ...", "def features(self, img, tasks):\n ensemble_probs = []\n\n model_iterable = self.tasks2models[tasks]\n ensemble_results = []\n for model in model_iterable():\n individual_feats = model.module.features2(img)\n ensemble_results.append(individual_feats)\n\n return torch.stack(ensemble_results)", "def style_loss(feats, style_layers, style_targets, style_weights):\n # Hint: you can do this with one for loop over the style layers, and should\n # not be short code (~5 lines). You will need to use your gram_matrix function.\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def _extract_proposal_features(self, preprocessed_inputs, scope):\n del scope\n\n if len(preprocessed_inputs.get_shape().as_list()) != 4:\n raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '\n 'tensor of shape %s' % preprocessed_inputs.get_shape())\n\n with slim.arg_scope(pnasnet_large_arg_scope_for_detection(\n is_batch_norm_training=self._train_batch_norm)):\n with arg_scope([slim.conv2d,\n slim.batch_norm,\n slim.separable_conv2d],\n reuse=self._reuse_weights):\n _, end_points = pnasnet.build_pnasnet_large(\n preprocessed_inputs, num_classes=None,\n is_training=self._is_training,\n final_endpoint='Cell_7')\n\n # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160.\n # Cell_7 is the last cell before second reduction.\n rpn_feature_map = tf.concat([end_points['Cell_6'],\n end_points['Cell_7']], 3)\n\n # pnasnet.py does not maintain the batch size in the first dimension.\n # This work around permits us retaining the batch for below.\n batch = preprocessed_inputs.get_shape().as_list()[0]\n shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]\n rpn_feature_map_shape = [batch] + shape_without_batch\n rpn_feature_map.set_shape(rpn_feature_map_shape)\n\n return rpn_feature_map, end_points", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def _preprocess_image(self, sess, image, is_first_frame):\n features, self.cropped_input_image = sess.run([self.features, self.image_after_crop], feed_dict = {\n self.image_ph: image,\n self.cropbox_ph: self.cropbox,\n })\n #by default should be [8, 8, 512]\n feature_dim = features.shape\n #logging.debug(feature_dim)\n num_features, num_channels = feature_dim\n\n if is_first_frame:\n #pad [num_features, 1]\n pad = np.zeros((num_features, 1))\n features = np.concatenate([features, pad], 1)\n #generate gt\n #gt shape: [num_features, 1]\n gt = np.reshape(preprocess.generate_gt(\n preprocess.apply_transformation(self.normalized_bbox,\n self.transformation),\n FLAGS.cropbox_grid, FLAGS.bbox_grid), [-1, 1])\n features = np.concatenate([features, gt], 1)\n else:\n #this pad contains pad and dummy gt\n pad = np.zeros((num_features, 2))\n features = np.concatenate([features, pad], 1)\n #[...,0,1,0]\n frame_delimiter = np.concatenate([\n np.zeros((1, num_channels)),\n np.ones((1, 1)),\n np.zeros((1, 1))], 1)\n features = np.concatenate([frame_delimiter, features], 0)\n return features", "def user_features(posts):\n \n result = []\n for post in posts:\n #n_comments = get_hostile_indices(post)[0] + 1\n n_comments = post['n_comments_observed']\n feature_list = []\n num_mentioned = 0\n user_list = post['users'][:n_comments]\n comment_list = post['comments'][:n_comments]\n ratio_users = len(set(user_list))/len(user_list)\n for c in comment_list:\n flag = has_mentioned(c) \n if flag:\n num_mentioned += 1\n \n ratio_mentioned = num_mentioned/len(user_list)\n \n feature_list.append(ratio_users)\n feature_list.append(ratio_mentioned)\n \n result.append(feature_list)\n\n X_matrix = np.asarray(result)\n headers = [\"other_features_1st\", \"other_features_2st\"]\n \n return X_matrix, headers", "def calculateFeatures( self,grayscaleImage, labelImage, featureClasses, settings, enabledImageTypes):\n # type: (Simple ITK image object, Simple ITK image object, list, dict, dict) -> dict\n print('Calculating features for %s', featureClasses)\n print('Instantiating the extractor')\n extractor = featureextractor.RadiomicsFeaturesExtractor(**settings)\n extractor.disableAllFeatures()\n extractor.enableAllInputImages()\n for feature in featureClasses:\n extractor.enableFeatureClassByName(feature)\n # extractor.disableAllImageTypes()\n # for imageType in enabledImageTypes:\n # extractor.enableImageTypeByName(imageType, customArgs=enabledImageTypes[imageType])\n print('Starting feature calculation')\n featureValues = {}\n try:\n featureValues = extractor.execute(grayscaleImage, labelImage)\n except:\n print('pyradiomics feature extractor failed')\n # traceback.print_exc()\n print('Features calculated')\n return featureValues", "def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)", "def get_all_features(train_data, test_data):\n #train_wc_matrix, test_wc_matrix = get_word_count_features(train_data, test_data)\n train_idf_matrix, test_idf_matrix = get_idf_features(train_data, test_data)\n train_ngram_matrix, test_ngram_matrix = get_ngram_features(train_data, test_data)\n # train_liwc_matrix, test_liwc_matrix = get_liwc_features(train_data, test_data)\n return sparse.hstack([train_idf_matrix, train_ngram_matrix]), \\\n sparse.hstack([test_idf_matrix, test_ngram_matrix])", "def generateFeatures(self, data):\n pass", "def export_coreml(self, filename):\n import coremltools\n # First define three internal helper functions\n\n\n # Internal helper function\n def _create_vision_feature_print_screen():\n prob_name = self.target + 'Probability'\n\n #\n # Setup the top level (pipeline classifier) spec\n #\n top_spec = coremltools.proto.Model_pb2.Model()\n top_spec.specificationVersion = 3\n\n desc = top_spec.description\n desc.output.add().name = prob_name\n desc.output.add().name = self.target\n\n desc.predictedFeatureName = self.target\n desc.predictedProbabilitiesName = prob_name\n\n input = desc.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')\n input.type.imageType.colorSpace = BGR_VALUE\n\n #\n # VisionFeaturePrint extractor\n #\n pipelineClassifier = top_spec.pipelineClassifier\n scene_print = pipelineClassifier.pipeline.models.add()\n scene_print.specificationVersion = 3\n scene_print.visionFeaturePrint.scene.version = 1\n\n input = scene_print.description.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n input.type.imageType.colorSpace = BGR_VALUE\n\n output = scene_print.description.output.add()\n output.name = \"output_name\"\n DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')\n output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n output.type.multiArrayType.shape.append(2048)\n\n #\n # Neural Network Classifier, which is just logistic regression, in order to use GPUs\n #\n temp = top_spec.pipelineClassifier.pipeline.models.add()\n temp.specificationVersion = 3\n\n # Empty inner product layer\n nn_spec = temp.neuralNetworkClassifier\n feature_layer = nn_spec.layers.add()\n feature_layer.name = \"feature_layer\"\n feature_layer.input.append(\"output_name\")\n feature_layer.output.append(\"softmax_input\")\n fc_layer_params = feature_layer.innerProduct\n fc_layer_params.inputChannels = 2048\n\n # Softmax layer\n softmax = nn_spec.layers.add()\n softmax.name = \"softmax\"\n softmax.softmax.MergeFromString(b'')\n softmax.input.append(\"softmax_input\")\n softmax.output.append(prob_name)\n\n input = temp.description.input.add()\n input.name = \"output_name\"\n input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n input.type.multiArrayType.shape.append(2048)\n\n # Set outputs\n desc = temp.description\n prob_output = desc.output.add()\n prob_output.name = prob_name\n label_output = desc.output.add()\n label_output.name = self.target\n\n if type(self.classifier.classes[0]) == int:\n prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')\n label_output.type.int64Type.MergeFromString(b'')\n else:\n prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')\n label_output.type.stringType.MergeFromString(b'')\n\n temp.description.predictedFeatureName = self.target\n temp.description.predictedProbabilitiesName = prob_name\n\n return top_spec\n\n\n # Internal helper function\n def _update_last_two_layers(nn_spec):\n # Replace the softmax layer with new coeffients\n num_classes = self.num_classes\n fc_layer = nn_spec.layers[-2]\n fc_layer_params = fc_layer.innerProduct\n fc_layer_params.outputChannels = self.classifier.num_classes\n inputChannels = fc_layer_params.inputChannels\n fc_layer_params.hasBias = True\n\n coefs = self.classifier.coefficients\n weights = fc_layer_params.weights\n bias = fc_layer_params.bias\n del weights.floatValue[:]\n del bias.floatValue[:]\n\n import numpy as np\n W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(\n inputChannels, num_classes - 1, order = 'F')\n b = coefs[coefs['index'] == None]['value']\n Wa = np.hstack((np.zeros((inputChannels, 1)), W))\n weights.floatValue.extend(Wa.flatten(order = 'F'))\n bias.floatValue.extend([0.0] + list(b))\n\n # Internal helper function\n def _set_inputs_outputs_and_metadata(spec, nn_spec):\n # Replace the classifier with the new classes\n class_labels = self.classifier.classes\n\n probOutput = spec.description.output[0]\n classLabel = spec.description.output[1]\n probOutput.type.dictionaryType.MergeFromString(b'')\n if type(class_labels[0]) == int:\n nn_spec.ClearField('int64ClassLabels')\n probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')\n classLabel.type.int64Type.MergeFromString(b'')\n del nn_spec.int64ClassLabels.vector[:]\n for c in class_labels:\n nn_spec.int64ClassLabels.vector.append(c)\n else:\n nn_spec.ClearField('stringClassLabels')\n probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')\n classLabel.type.stringType.MergeFromString(b'')\n del nn_spec.stringClassLabels.vector[:]\n for c in class_labels:\n nn_spec.stringClassLabels.vector.append(c)\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n old_output_name = nn_spec.layers[-1].name\n coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)\n coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)\n if nn_spec.layers[-1].name == old_output_name:\n nn_spec.layers[-1].name = prob_name\n if nn_spec.labelProbabilityLayerName == old_output_name:\n nn_spec.labelProbabilityLayerName = prob_name\n coremltools.models.utils.rename_feature(spec, 'data', self.feature)\n if len(nn_spec.preprocessing) > 0:\n nn_spec.preprocessing[0].featureName = self.feature\n\n mlmodel = coremltools.models.MLModel(spec)\n model_type = 'image classifier (%s)' % self.model\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n mlmodel.input_description[self.feature] = u'Input image'\n mlmodel.output_description[prob_name] = 'Prediction probabilities'\n mlmodel.output_description[label_name] = 'Class label of top prediction'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'model': self.model,\n 'target': self.target,\n 'features': self.feature,\n 'max_iterations': str(self.max_iterations),\n }, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)\n\n return mlmodel\n\n\n # main part of the export_coreml function\n if self.model in _pre_trained_models.MODELS:\n ptModel = _pre_trained_models.MODELS[self.model]()\n feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)\n\n coreml_model = feature_extractor.get_coreml_model()\n spec = coreml_model.get_spec()\n nn_spec = spec.neuralNetworkClassifier\n else: # model == VisionFeaturePrint_Screen\n spec = _create_vision_feature_print_screen()\n nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier\n\n _update_last_two_layers(nn_spec)\n mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)\n mlmodel.save(filename)", "def pos_features(compactcorpus):\n start=time()\n \n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,25,False),8)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,25,False),8)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,25,False),8)\n #tag_bigrams =common_but_unique(ngrams_dict(2,authors,compact_to_tag(compactcorpus),20,False),15) #PAS OP Duurt erg lang om te gebruiken (dus ook nog niet getest...ivm tijd)\n skipgrams = common_but_unique(skipgrams_dict(authors,compactcorpus,10),10)\n\n minimal_wrdoccurence = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n minimal_trigram_occurence = [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n minimal_bigram_occurence = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)]\n #minimal_skipgram_occurence = [\"skip:(\"+str(skip[0])+\",\"+str(skip[1])+\",\"+str(skip[2])+\")>\"+str(num) for skip in skipgrams for num in range(0,1)]\n\n features = minimal_bigram_occurence + minimal_wrdoccurence + minimal_trigram_occurence #+ minimal_skipgram_occurence\n print \"pos feat in:\"+str(time()-start)\n return features", "def findFeatures(self):\n\t\tpass", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def build_features(self, example):\n context_idxs = np.full([self._para_limit],\n fill_value=self._word_vocab[self._word_vocab.padding_token],\n dtype=np.float32)\n\n ctx_chars_idxs = np.full([self._para_limit, self._char_limit],\n fill_value=self._char_vocab[self._char_vocab.padding_token],\n dtype=np.float32)\n\n ques_idxs = np.full([self._ques_limit],\n fill_value=self._word_vocab[self._word_vocab.padding_token],\n dtype=np.float32)\n\n ques_char_idxs = np.full([self._ques_limit, self._char_limit],\n fill_value=self._char_vocab[self._char_vocab.padding_token],\n dtype=np.float32)\n\n context_len = min(len(example['context_tokens']), self._para_limit)\n context_idxs[:context_len] = self._get_words_emb(example['context_tokens'][:context_len])\n\n ques_len = min(len(example['ques_tokens']), self._ques_limit)\n ques_idxs[:ques_len] = self._get_words_emb(example['ques_tokens'][:ques_len])\n\n for i in range(0, context_len):\n char_len = min(len(example['context_chars'][i]), self._char_limit)\n ctx_chars_idxs[i, :char_len] = self._char_vocab[example['context_chars'][i][:char_len]]\n\n for i in range(0, ques_len):\n char_len = min(len(example['ques_chars'][i]), self._char_limit)\n ques_char_idxs[i, :char_len] = self._char_vocab[example['ques_tokens'][i][:char_len]]\n\n start, end = example['y1s'][-1], example['y2s'][-1]\n\n record = (example['id'],\n example['record_idx'],\n context_idxs,\n ques_idxs,\n ctx_chars_idxs,\n ques_char_idxs,\n start,\n end,\n example['context'],\n example['spans'])\n\n return record", "def computeTemplateFeatures(self, templates, mode=''):\n pass", "def generate_feature_stack(image, features_specification : Union[str, PredefinedFeatureSet] = None):\n\n image = cle.push(image)\n\n # default features\n if features_specification is None:\n blurred = cle.gaussian_blur(image, sigma_x=2, sigma_y=2, sigma_z=2)\n edges = cle.sobel(blurred)\n stack = [\n image,\n blurred,\n edges\n ]\n\n return stack\n if isinstance(features_specification, PredefinedFeatureSet):\n features_specification = features_specification.value\n\n while \" \" in features_specification:\n features_specification = features_specification.replace(\" \", \" \")\n while \"\\t\" in features_specification:\n features_specification = features_specification.replace(\"\\t\", \" \")\n\n features_specs = features_specification.split(\" \")\n generated_features = {}\n\n result_features = []\n\n for spec in features_specs:\n if spec.lower() == 'original':\n generated_features['original'] = image\n result_features.append(image)\n elif \"=\" in spec:\n temp = spec.split(\"=\")\n operation = temp[0]\n numeric_parameter = float(temp[1])\n\n if not hasattr(cle, operation) and \"_of_\" in operation:\n temp = operation.split(\"_of_\")\n outer_operation = temp[0]\n inner_operation = temp[1]\n\n if (inner_operation+\"=\"+str(numeric_parameter)) not in generated_features.keys():\n new_image = cle.create_like(image)\n _apply_operation(inner_operation, image, new_image, numeric_parameter)\n generated_features[inner_operation+\"=\"+str(numeric_parameter)] = new_image\n\n if (operation+\"=\"+str(numeric_parameter)) not in generated_features.keys():\n new_image2 = cle.create_like(image)\n _apply_operation(outer_operation, generated_features[inner_operation+\"=\"+str(numeric_parameter)], new_image2, numeric_parameter)\n generated_features[operation+\"=\"+str(numeric_parameter)] = new_image2\n else:\n if (operation+\"=\"+str(numeric_parameter)) not in generated_features:\n new_image = cle.create_like(image)\n _apply_operation(operation, image, new_image, numeric_parameter)\n generated_features[operation+\"=\"+str(numeric_parameter)] = new_image\n\n result_features.append(generated_features[operation+\"=\"+str(numeric_parameter)])\n\n return result_features", "def newsgroup_featurize(data_list):\n # TODO: Implement featurization of input.\n all_text = data_list[\"train\"][\"input\"] + data_list[\"test\"][\"input\"] + data_list[\"dev\"][\"input\"]\n word_dict = word_count(all_text)\n bow_noun_features = bow_noun(word_dict) # 11,925 features\n train_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"train\"][\"input\"]])\n dev_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"dev\"][\"input\"]])\n test_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"test\"][\"input\"]])\n return train_input, dev_input, test_input" ]
[ "0.78582704", "0.7087954", "0.6903325", "0.6433953", "0.64332956", "0.64121157", "0.6316672", "0.62753826", "0.61490476", "0.6146242", "0.61101943", "0.60842645", "0.6055493", "0.60267216", "0.60207623", "0.6020109", "0.6018783", "0.6010386", "0.5994345", "0.5985522", "0.5985261", "0.5937383", "0.59319365", "0.59129643", "0.5911842", "0.58991164", "0.589174", "0.58887327", "0.58822024", "0.5846836", "0.5834873", "0.58206975", "0.5815763", "0.5811391", "0.5811391", "0.58039314", "0.57672405", "0.5758259", "0.57484305", "0.574476", "0.57388735", "0.5728951", "0.57244736", "0.5711763", "0.5701025", "0.56942636", "0.5676399", "0.5676363", "0.56755346", "0.5651548", "0.56437546", "0.5634602", "0.56169987", "0.5612094", "0.5609425", "0.560287", "0.5602374", "0.55972165", "0.55955154", "0.5594891", "0.55921775", "0.55887526", "0.55872226", "0.5582846", "0.55791783", "0.55779344", "0.55755264", "0.55620825", "0.5561169", "0.55580586", "0.5555041", "0.5550528", "0.5538494", "0.5535507", "0.5535317", "0.553099", "0.5526658", "0.55247164", "0.55197173", "0.55197173", "0.5518186", "0.5485069", "0.54765344", "0.547342", "0.54645693", "0.5464555", "0.54626185", "0.5459301", "0.54581326", "0.5456584", "0.5440496", "0.5439461", "0.54313076", "0.5430203", "0.54256207", "0.5424641", "0.5422477", "0.54170704", "0.5416888", "0.5415997" ]
0.78912055
0
Estimates the strongest drivers of each neuron using GTE.
def estimate_parents(D, verbose=1, **params): # Parameters CL = params.setdefault('CL', 0.25) k = params.setdefault('k', 2) IFT = params.setdefault('IFT', True) estimate_CL = params.setdefault('estimate_CL', False) num_parents = params.setdefault('num_parents', 3) if verbose > 0: print('Estimating parents using GTE') # Cast D to only two bins for activity level D = np.greater(D, 0) parents = dict() scores = calc_GTE( D.T, CL=CL, k=k, IFT=IFT, estimate_CL=estimate_CL, verbose=verbose) for i in range(scores.shape[0]): p = (-scores[:,i]).argsort()[:num_parents] parents[i] = p return parents, scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SGD2_5all(self, training_data, test_data=None):\r\n final = []\r\n final.append(self.SGD2_5_1(training_data, 10, 10, 3.0,test_data))\r\n\r\n print(\"first done\")\r\n\r\n self.sizes=[784,30,10]\r\n self.num_layers = len(self.sizes)\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x)*(1/math.sqrt(x)) for x,y in zip(self.sizes[:-1],self.sizes[1:])]\r\n \r\n final.append(self.SGD2_5_1(training_data, 10, 10, 3.0,test_data))\r\n\r\n print(\"second done\")\r\n\r\n self.sizes=[784,30,10]\r\n self.num_layers = len(self.sizes)\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n final.append(self.SGD2_5_2(training_data, 10, 10, 3.0,test_data))\r\n print(\"third done\")\r\n print(sum(int(major(x,a,m,y)) for (x, y),(a,b),(m,n) in zip(final[0],final[1],final[2])))", "def greedy_ensemble( loss_mN, w_km, ensemble_size=10 ):\n \n eLoss_kN = np.dot(w_km, loss_mN)\n \n ensemble_d = [np.argmin( np.mean(loss_mN, 0) )] # start the ensemble with the best model\n \n for _i in range(ensemble_size-1):\n greedy_prob_N = greedy_prob( eLoss_kN, ensemble_d )\n greedy_prob_N[ensemble_d] = 0 # prevents from selecting twice the same\n ensemble_d.append( rand_arg_min( -greedy_prob_N ) )\n \n prob_d = agnostic_bayes_prob( eLoss_kN[:,ensemble_d] )\n return ensemble_d, prob_d", "def find_shortest_network_with_ADH(self, extension):\n\n self.logger.info(\"Solving with the Minimum Average Distance Heuristic:\")\n self.logger.info(\"extension mode activated: %s\" % str(extension))\n self.logger.info(\"Steiner preprocessing start\")\n G, terminal_nodes, link = self.preprocessing_steiner(extension)\n intersect_terminal_nodes = set()\n for tn in terminal_nodes:\n if G.node[tn].get(config.NODE_TYPE_KEY, None) == config.BUILDING_NODE_TYPE:\n intersect_terminal_nodes.add(list(G.neighbors(tn))[0])\n G.remove_node(tn)\n terminal_nodes = intersect_terminal_nodes\n self.logger.info(\"Steiner preprocessing end\")\n\n # Research of the shortest paths\n self.logger.info(\"Heuristic run start\")\n dictpathlength = dict(nx.shortest_path_length(G, weight=config.EDGE_COST_KEY))\n \n # 1. \n forest = []\n for node in terminal_nodes:\n temp = nx.Graph()\n temp.add_node(node)\n forest.append(temp)\n nb_terminals = len(terminal_nodes)\n nb_trees = nb_terminals\n\n # little check\n if nb_trees == 0:\n raise ValueError(\"There isn't building to connect\")\n\n # creation of a dictionnary {node:{tree:distance,...},...} which contains the minimal distance of a node to a tree\n path_length_node_to_tree = {}\n for node in G.nodes():\n path_length_node_to_tree[node] = {}\n for i_tree in range(len(forest)):\n # for the moment, it's easy: there is only one node in each tree\n node_tree = list(forest[i_tree].nodes())[0]\n path_length_node_to_tree[node][i_tree] = (node_tree,dictpathlength[node][node_tree])\n\n # trees which can be used\n set_trees = set(range(len(forest)))\n\n # 2. Loop \n while nb_trees > 1:\n function_min = math.inf\n for node in G.nodes():\n # we want to find the two trees with the shortest distance to node\n # first_least = [index_of_tree, the_node_of_the_tree_to_connect, minimal_distance]\n first_least, second_least = [None, None, math.inf], [None, None, math.inf]\n for i_tree in set_trees:\n # minimal distance between node to tree[i_tree]\n v, length = path_length_node_to_tree[node][i_tree]\n # update of the two closest tree from node\n if length < first_least[2]:\n first_least, second_least = [i_tree, v, length], first_least\n elif length < second_least[2]:\n second_least = [i_tree, v, length]\n \n val_function = (second_least[2]+first_least[2])/2\n # update the minimum of f on the nodes, with the corresponding closest trees and the connections\n if val_function < function_min:\n function_min = val_function\n central_node = node\n [i_tree1, connection1, _], [i_tree2, connection2, _] = first_least, second_least\n\n # fusion\n newtree = nx.union(forest[i_tree1], forest[i_tree2])\n if central_node != connection1:\n path1 = nx.shortest_path(G, source=central_node, target=connection1, weight='cost')\n newtree.add_edges_from([(path1[k], path1[k+1]) for k in range(len(path1) - 1)])\n else:\n path1 = [connection1]\n if central_node != connection2:\n path2 = nx.shortest_path(G, source=central_node, target=connection2, weight='cost')\n newtree.add_edges_from([(path2[k], path2[k+1]) for k in range(len(path2) - 1)])\n else:\n path2 = [connection2]\n\n # update of the dictionnary path_length_node_to_tree\n for node in G.nodes(): \n # first, find the minimal distance between node and newtree \n v1, dist1 = path_length_node_to_tree[node][i_tree1]\n v2, dist2 = path_length_node_to_tree[node][i_tree2]\n v3, dist3 = None, math.inf\n for node1 in path1:\n if dictpathlength[node][node1] < dist3:\n v3, dist3 = node1, dictpathlength[node][node1]\n for node2 in path2:\n if dictpathlength[node][node2] < dist3:\n v3, dist3 = node2, dictpathlength[node][node2]\n # then, compute the distance and the node of the tree to connect\n if dist1 == min(dist1,dist2,dist3):\n path_length_node_to_tree[node][len(forest)] = (v1, dist1)\n elif dist2 == min(dist1,dist2,dist3):\n path_length_node_to_tree[node][len(forest)] = (v2, dist2)\n else:\n path_length_node_to_tree[node][len(forest)] = (v3, dist3)\n\n # update of the forest\n forest.append(newtree)\n forest[i_tree1] = None\n forest[i_tree2] = None\n # update of the usable trees\n set_trees.add(len(forest)-1)\n set_trees.remove(i_tree1)\n set_trees.remove(i_tree2)\n nb_trees -= 1\n if nb_trees % 10 == 0:\n self.logger.info('%.1f %% of terminal points connected' %\n ((nb_terminals - nb_trees) / (nb_terminals - 1) * 100))\n\n # 3. \n solution_steiner = forest[list(set_trees)[0]]\n for node in solution_steiner.nodes:\n if node not in terminal_nodes and solution_steiner.degree(node) < 2:\n solution_steiner.remove_node(node)\n self.logger.info(\"Heuristic run end\")\n\n self.logger.info(\"Steiner postprocessing start\")\n sol_tree = self.postprocessing_steiner(extension, solution_steiner, link)\n self.logger.info(\"Steiner postprocessing end\")\n\n # return a directed graph with attributes\n length = 0\n edges_to_keep = []\n for u, v, k in self.optimization_graph.edges:\n if sol_tree.has_edge(u, v) or sol_tree.has_edge(v, u):\n edges_to_keep.append((u, v, k))\n length += self.optimization_graph.edges[u, v, k]['cost']\n if self.optimization_graph.node[u][config.NODE_TYPE_KEY] == config.BUILDING_NODE_TYPE \\\n or self.optimization_graph.node[v][config.NODE_TYPE_KEY] == config.BUILDING_NODE_TYPE:\n edges_to_keep.append((u, v, k))\n length += self.optimization_graph.edges[u, v, k]['cost']\n\n solution_graph = nx.MultiGraph(self.optimization_graph.edge_subgraph(edges_to_keep))\n return solution_graph, length/2", "def get_n_best(self):\n pass", "def topGenes(X,Y,feature_name,class_len, feature_len, method, nb_samples, device, net): \n \n input_x = torch.from_numpy(X).float().to(device)\n if method == 'Shap':\n print(\"Running Shap Model... (It may take a long time)\")\n nb_samples = nb_samples\n rand_index = np.random.choice(input_x.shape[0], nb_samples, replace=True)\n background = input_x[rand_index]\n Y_rand = Y[rand_index].reshape(-1,1)\n Y_unique,Y_counts = np.unique(Y_rand,return_counts=True)\n # Create object that can calculate shap values and explain predictions of the model\n explainer = shap.DeepExplainer(net.encoder, background)\n # Calculate Shap values, with dimension (y*N*x) y:number of labels, N number of background samples, x number of features\n shap_values = explainer.shap_values(background)\n if method =='Captum_ig':\n baseline = torch.zeros((X.shape)).to(device)\n ig = IntegratedGradients(net.encoder)\n attributions, delta = ig.attribute(input_x, baseline, target=0, return_convergence_delta=True)\n if method =='Captum_dl':\n baseline = torch.zeros((X.shape)).to(device)\n dl = DeepLift(net.encoder)\n attributions, delta = dl.attribute(input_x, baseline, target=0, return_convergence_delta=True) \n if method =='Captum_gs':\n baseline_dist = (torch.randn((X.shape))* 0.001).to(device)\n gs = GradientShap(net.encoder)\n attributions, delta = gs.attribute(input_x, stdevs=0.09, n_samples=10, \\\n baselines=baseline_dist, target=0, return_convergence_delta=True) \n \n # Use the weight differences to do rank\n if class_len ==2:\n class_len = 1\n feature_rank = np.empty((feature_len,2*class_len), dtype=object) #save ranked features and weights\n # one class vs others\n for class_index in range(class_len):\n attributions_mean_list =[]\n Y_i = Y.copy()\n Y_i[ Y_i != class_index ] = class_index+1 # change to 2 class\n Y_unique,Y_counts = np.unique(Y_i,return_counts=True)\n # repeat 2 times\n for i in Y_unique:\n if method =='Shap':\n attributions_i = torch.from_numpy(shap_values[i]).float().to(device)\n else:\n attributions_i = attributions[Y_i==i] # find all X of each class\n attributions_mean = torch.mean(attributions_i, dim =0) \n attributions_mean_list.append(attributions_mean)\n # class_weight differences \n class_weight = attributions_mean_list[0] - attributions_mean_list[1] \n attributions_weight, index_sorted = torch.sort(class_weight, descending= True)\n attributions_name = np.array([feature_name[x] for x in index_sorted])\n attributions_weight = attributions_weight.detach().cpu()\n feature_rank[:,class_index*2 ] = attributions_name\n feature_rank[:,class_index*2+1 ] = attributions_weight \n \n # Save results as DAtaFrame \n mat_head = np.array(['topGenes' if x%2==0 else 'Weights' for x in range(class_len*2)])\n mat_head = mat_head.reshape(1,-1)\n mat = np.r_[mat_head ,feature_rank ]\n mat[1:, 1] = mat[1:,1]/float(mat[1,1])\n columns = ['Class'+str(int(x/2)+1) for x in range(class_len*2)] \n ind_df = ['Attributes']+ [str(x) for x in range(feature_len)]\n res = pd.DataFrame(mat,index=ind_df,columns=columns)\n return res", "def _hg_model_fn(features, labels, mode, params):\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def train_linear_ensemble(x, y, alpha, max_iter, n_ensembles):\n # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n x_train, y_train = x, y\n ensemble_models = []\n for i in range(n_ensembles):\n samples = sample_without_replacement(n_population=x_train.shape[0], n_samples=(x_train.shape[0]/5))\n x_seg_train = pd.DataFrame()\n y_seg_train = pd.Series()\n for sample in samples:\n x_seg_train = pd.concat([x_seg_train, x_train.iloc[[sample]]])\n y_seg_train = pd.concat([y_seg_train, y_train.iloc[[sample]]])\n\n model: Ridge = Ridge(alpha=alpha, normalize=True, max_iter=max_iter).fit(x_seg_train, y_seg_train)\n print(model.score(x_seg_train, y_seg_train))\n # print(model.score(x_test, y_test))\n ensemble_models.append(model)\n\n return ensemble_models", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def get_model_fn(num_gpus, variable_strategy, num_workers):\n\n def _hg_model_fn(features, labels, mode, params):\n \"\"\" HG model body.\n\n Support single host, one or more GPU training. Parameter distribution can\n be either one of the following scheme.\n 1. CPU is the parameter server and manages gradient updates.\n 2. Parameters are distributed evenly across all GPUs, and the first GPU\n manages gradient updates.\n\n Args:\n features: a list of tensors, one for each tower\n labels: a list of tensors, one for each tower\n mode: ModeKeys.TRAIN or EVAL\n params: Hyperparameters suitable for tuning\n Returns:\n A EstimatorSpec object.\n \"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)\n\n return _hg_model_fn", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def TST_LCE_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def TST_C2ST_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def greedy_prob( eLoss_kN, ensemble_d ):\n \n N = eLoss_kN.shape[1]\n\n prob_N = np.zeros(N)\n for i in range(N):\n \n new_ensemble_D = np.append(ensemble_d, i ) # add model i to the ensemble\n prob_D = agnostic_bayes_prob(eLoss_kN[:,new_ensemble_D] ) \n prob_N[i] = prob_D[-1] # collect the probability of the recently added model\n \n return prob_N", "def Network_CV_Optimise(networklist, taurange, glaciername='Glacier'):\n bestfitarr = []\n for j, d in enumerate(networklist): \n optimal_ty, optimal_t0 = CV_Optimise(glaciername+str(j), d, taurange)\n d['Best tau_y'] = optimal_ty\n d['Best tau_0'] = optimal_t0\n bestfitarr.append((optimal_ty, optimal_t0))\n \n return bestfitarr", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "def eval_genome_eer(g, conf, batch, backprop=False, use_gate=True):\n\n # inputs: batch_size x t x bins\n # outputs: batch_size\n inputs, targets = batch\n # inputs: t x batch_size x bins\n inputs = inputs.transpose(0, 1)\n\n net = neat_local.nn.RecurrentNet.create(g, conf, device=\"cpu\", dtype=torch.float32)\n assert not backprop\n net.reset(len(targets))\n\n contribution = torch.zeros(len(targets))\n norm = torch.zeros(len(targets))\n for input_t in inputs:\n # input_t: batch_size x bins\n\n xo = net.activate(input_t) # batch_size x 2\n score = xo[:, 1]\n confidence = xo[:, 0] if use_gate else torch.ones_like(score)\n contribution += score * confidence # batch_size\n norm += confidence # batch_size\n\n jitter = 1e-8\n prediction = contribution / (norm + jitter) # batch_size\n\n target_scores = prediction[targets == 1].numpy() # select with mask when target == 1\n non_target_scores = prediction[targets == 0].numpy() # select with mask when target == 0\n\n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n\n return 2 * (.5 - eer)", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def test_optimalagentfinder () :\n def valNetwork (s) : \n s = s.float()\n v = reduce(model.withReluDropout, model.v[:-1], s)\n v = model.v[-1](v)\n return v\n acrobotBases = acrobotRewardBases(np.pi / 8, np.pi / 8)\n fn = random.sample(acrobotBases, k=1).pop()\n agent = findOptimalAgent(fn)\n model = agent.model\n toExternal = lambda x, y : toExternalStateRep([x, y, 0, 0])\n valFn = reduce(compose, [float, valNetwork, torch.tensor, toExternal])\n RFn = compose(fn, toExternal)\n xRange = np.arange(-np.pi, np.pi, 0.1)\n yRange = np.arange(-np.pi, np.pi, 0.1)\n plotFunction(RFn, xRange, yRange, 'theta1', 'theta2', 'R')\n plotFunction(valFn, xRange, yRange, 'theta1', 'theta2', 'V')", "def TST_ME_DK_per(X, Y, T, X_org, Y_org, T_org, alpha, sigma, sigma0, epsilon):\r\n N_per = 100\r\n J = T.shape[0]\r\n s = compute_ME_stat(X, Y, T, X_org, Y_org, T_org, sigma, sigma0, epsilon)\r\n Fea = torch.cat([X.cpu(), Y.cpu()], 0).cuda()\r\n Fea_org = torch.cat([X_org.cpu(), Y_org.cpu()], 0).cuda()\r\n N1 = X.shape[0]\r\n N = Fea.shape[0]\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = compute_ME_stat(Fea[ind_X,:], Fea[ind_Y,:], T, Fea_org[ind_X,:], Fea_org[ind_Y,:], T_org, sigma, sigma0, epsilon)\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n h = 0\r\n if s.item() > threshold:\r\n h = 1\r\n return h, threshold, s", "def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]", "def start_neuroevolution(x, y, x_test, y_test):\n\n connections = [(0, INPUT0, OUTPUT0), (1, INPUT1, OUTPUT0), (2, INPUT0, OUTPUT1), (3, INPUT1, OUTPUT1)]\n genotypes = [{0: True, 1: True, 2: True, 3: True} for d in xrange(5)]\n\n for its in xrange(0,5):\n print \"iteration\", its\n\n fitnesses = []\n # test networks\n for i in xrange(0,len(genotypes)):\n fitnesses.append(eval_fitness(connections, genotypes[i], x, y, x_test, y_test, run_id=str(its) + \"/\" + str(i)))\n\n # get indices of sorted list\n fitnesses_sorted_indices = [i[0] for i in reversed(sorted(enumerate(fitnesses), key=lambda x: x[1]))]\n\n print \"connections:\\n\"\n print connections\n for ra in xrange(0,len(fitnesses_sorted_indices)):\n print fitnesses[fitnesses_sorted_indices[ra]], genotypes[fitnesses_sorted_indices[ra]]\n\n # run evolutions\n # todo: fiddle with parameters, include size of network in fitness?\n new_gen = []\n # copy five best survivors already\n m = 5\n if m > len(fitnesses):\n m = len(fitnesses)\n\n for i in xrange(0,m):\n print \"adding:\", fitnesses[fitnesses_sorted_indices[i]], genotypes[fitnesses_sorted_indices[i]]\n new_gen.append(genotypes[fitnesses_sorted_indices[i]])\n\n for i in xrange(0,len(fitnesses_sorted_indices)):\n fi = fitnesses_sorted_indices[i]\n r = np.random.uniform()\n # select the best for mutation and breeding, kill of worst.\n if r <= 0.2:\n # mutate\n connections, gen = add_connection(connections, genotypes[i])\n new_gen.append(gen)\n r = np.random.uniform()\n if r <= 0.5:\n connections, gen = add_node(connections, genotypes[i])\n new_gen.append(gen)\n\n r = np.random.uniform()\n if r <= 0.1:\n # select random for breeding\n r = np.random.randint(0,len(fitnesses))\n r2 = np.random.randint(0,len(fitnesses) - 1)\n if r2 >= r:\n r2 +=1\n gen = crossover(connections, genotypes[r], fitnesses[r], genotypes[r2], fitnesses[r2])\n new_gen.append(gen)\n new_gen.append(genotypes[fi])\n # stop if we have 5 candidates\n if len(new_gen) > 10:\n break\n genotypes = new_gen", "def decide_next_query(self):\n for gp in self.gps:\n build_gp_posterior(gp)\n # Find the best mean values for each gp.\n best_f, best_pt, best_gain = None, None, float('-inf')\n queries = self._get_queried_pts()\n for f_idx, f_name in enumerate(self.f_names):\n gp = self.gps[f_idx]\n f_qs = queries[f_name]\n # Assemble points to draw sample from.\n low, high = zip(*self.domains[f_idx])\n rand_pts = np.random.uniform(low, high,\n (self.options.max_opt_evals, len(low)))\n samp_pts = np.vstack([f_qs, rand_pts])\n samp_vals = gp.draw_sample(samp_pts=samp_pts).ravel()\n max_prev = np.max(samp_vals[:len(f_qs)])\n best_new_idx = np.argmax(samp_vals[len(f_qs):]) + len(f_qs)\n gain = samp_vals[best_new_idx] - max_prev\n if gain > best_gain:\n best_f = f_idx\n best_pt = samp_pts[best_new_idx]\n best_gain = gain\n return best_f, best_pt", "def sgd_model(params):\n\n \n if (params['random']):\n params['loss'] = random.choice(['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 'squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'])\n params['penalty'] = random.choice(['none', 'l2', 'l1', 'elasticnet'])\n params['alpha'] = random.choice([0.001, 0.0001, 0.00001])\n model = SGDClassifier(\n loss=params['loss'],\n penalty=params['penalty'],\n alpha=params['alpha']\n )\n\n return model", "def genresnet50(**kwargs):\n return EresNet(resnetblocks.EresNetBottleneck, [3, 4, 6, 3], **kwargs)", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def driver_setup(prob):\n\n if Rt.type == \"OPTIM\":\n # TBD : Genetic algorithm\n # if len(Rt.objective) > 1 and False:\n # log.info(\"\"\"More than 1 objective function, the driver will\n # automatically be set to NSGA2\"\"\")\n # prob.driver = om.pyOptSparseDriver() # multifunc driver : NSGA2\n # prob.driver.options['optimizer'] = 'NSGA2'\n # prob.driver.opt_settings['PopSize'] = 7\n # prob.driver.opt_settings['maxGen'] = Rt.max_iter\n # else:\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options[\"optimizer\"] = Rt.driver\n prob.driver.options[\"maxiter\"] = Rt.max_iter\n prob.driver.options[\"tol\"] = Rt.tol\n prob.driver.options[\"disp\"] = True\n elif Rt.type == \"DOE\":\n if Rt.doedriver == \"Uniform\":\n driver_type = om.UniformGenerator(num_samples=Rt.samplesnb)\n elif Rt.doedriver == \"LatinHypercube\":\n driver_type = om.LatinHypercubeGenerator(samples=Rt.samplesnb)\n elif Rt.doedriver == \"FullFactorial\":\n driver_type = om.FullFactorialGenerator(levels=Rt.samplesnb)\n elif Rt.doedriver == \"CSVGenerated\":\n file = gen_doe_csv(Rt.user_config)\n driver_type = om.CSVGenerator(file)\n prob.driver = om.DOEDriver(driver_type)\n prob.driver.options[\"run_parallel\"] = True\n prob.driver.options[\"procs_per_model\"] = 1\n else:\n log.error(\"Type of optimisation not recognize!!!\")\n\n # Attaching a recorder and a diagramm visualizer ##\n prob.driver.recording_options[\"record_inputs\"] = True\n prob.driver.add_recorder(om.SqliteRecorder(str(Rt.optim_dir) + \"/circuit.sqlite\"))\n prob.driver.add_recorder(om.SqliteRecorder(str(Rt.optim_dir) + \"/Driver_recorder.sql\"))", "def run():\n\n # Build list of stations\n stations = build_station_list()\n list_of_rivers_numbers=rivers_by_station_number(stations, 9)\n print(\"Rivers with greatest number of stations: {}\".format(list_of_rivers_numbers))", "def simulation(G, # graph object\n pos = None, # positions of nodes\n n = 5, # number of simulation steps\n \n # wrapped args for simulation_step function\n kernel = 'weights', # simulation kernel\n custom_kernel = None, # custom simulation kernel\n WERE_multiplier = 10, # multiplier for WERE kernel\n oblivion = False, # enable information oblivion\n engagement_enforcement = 1.01,\n draw = False, # draw graph\n show_attr = False): # show attributes \n \n #=======================================#\n # append nodes data from 0 step to list #\n #=======================================#\n \n graph_list = []\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n\n #===================#\n # Run n simulations #\n #===================#\n \n for i in range(n):\n dp.simulation_step(G = G, \n pos = pos, \n \n kernel = kernel,\n custom_kernel = custom_kernel,\n WERE_multiplier = WERE_multiplier, \n oblivion = oblivion, \n engagement_enforcement = engagement_enforcement,\n draw = draw, \n show_attr = show_attr)\n\n # save nodes data to to list\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n \n #======================================================#\n # Count aware agents before and after simulation steps #\n #======================================================#\n \n # Check number of aware agents in 0 step\n #global aware_first\n aware_first = []\n for i in range(len(graph_list[0])):\n aware_first.append(graph_list[0][i][1]['state'])\n aware_first_c = aware_first.count('aware')\n \n # graph_list[0][1][1]['state']\n \n # Check number of aware agents in the last step\n #global aware_last\n aware_last = []\n graph_list_len = len(graph_list) - 1\n for i in range(len(graph_list[0])):\n aware_last.append(graph_list[graph_list_len][i][1]['state']) # n is the last sim\n aware_last_c = aware_last.count('aware')\n \n #graph_list[5][0][1]['state']\n \n #=================================#\n # diffusion performance measuring #\n #=================================#\n \n # equation for diffusion performance measuring\n avg_aware_inc_per_step = (aware_last_c - aware_first_c) / n\n \n # show graph statistics\n return graph_list, avg_aware_inc_per_step", "def evo_alg(evo_type,env,n_hidden_neurons):\n # initialise parameters for NEAT1\n ngens = 10\n population = 20\n best = 10\n\n # initialise parameters for NEAT2\n if evo_type=='NEAT2':\n best = False\n\n pop_data = {}\n\n # initialize population randomly\n for ind in range(population):\n # initialise random weights\n\n weights = np.random.uniform(-1,1,size=(n_hidden_neurons+20*n_hidden_neurons+5+n_hidden_neurons*5))\n\n fitness = run_play(weights)\n pop_data[ind] = (fitness, weights)\n\n # perform evolutionary algorithm for all generations\n for gen in range(ngens):\n\n print(f'RUN: {gen+1}')\n\n # sort by fitness\n pop_data={k: v for k, v in sorted(pop_data.items(), key=lambda item: item[1][0], reverse=True)}\n\n # perform cross-over on best individuals\n all_weights = cross_rand(pop_data,best,population)\n\n # overwrite old population data\n pop_data = {}\n\n for ind in range(population):\n weights = mutate(all_weights[ind],gen)\n\n fitness = run_play(weights)\n pop_data[ind] = (fitness,weights)", "def visit_all_possibilities(self, first_station, track, grid):\n # loops over connections of station\n for connection in first_station.connections:\n # keeps adding untill the max length of a track is reached\n if track.add_station(grid, self.stations[connection].name):\n # calculates the quality of adding the station and remembers it if it is the best score yet\n if grid.get_quality() > self.best_score:\n self.best_score = grid.get_quality()\n self.grid = copy.deepcopy(grid)\n print(f\"new best score: {self.best_score}:\\n{self.grid}\\n\\n\")\n\n # repeat untill there are no more configurations left\n self.visit_all_possibilities(self.stations[connection], track, grid)\n track.remove_last_station()", "def TrainSGTRidge(As, bs, num_tols = 50, lam = 1e-5, normalize = 2):\n\n np.random.seed(0) # for consistancy\n\n m = len(As)\n n,D = As[0].shape\n \n # Normalize\n if normalize != 0:\n\n # get norm of each column\n candidate_norms = np.zeros(D)\n for i in range(D):\n candidate_norms[i] = Norm(np.vstack(A[:,i] for A in As), normalize)\n\n norm_bs = [m*Norm(b, normalize) for b in bs]\n\n # normalize \n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms**-1))\n bs[i] = bs[i]/norm_bs[i]\n \n # Get array of tols to check\n x_ridge = np.hstack([Ridge(A,b,lam) for (A,b) in zip(As, bs)])\n max_tol = np.max([Norm(x_ridge[j,:]) for j in range(x_ridge.shape[0])])\n min_tol = np.min([Norm(x_ridge[j,:]) for j in range(x_ridge.shape[0])])\n Tol = [0]+[np.exp(alpha) for alpha in np.linspace(np.log(min_tol), np.log(max_tol), num_tols)][:-1]\n\n # Test each value of tol to find the best\n X = []\n Losses = []\n\n for tol in Tol:\n x = SGTRidge(As,bs,tol)\n X.append(x)\n Losses.append(PDE_FIND_Loss(As, bs, x))\n\n if normalize != 0:\n for x in X:\n for i in range(D):\n for j in range(m):\n x[i,j] = x[i,j]/candidate_norms[i]*norm_bs[j]\n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms))\n bs[i] = bs[i]*norm_bs[i]\n \n return X,Tol,Losses", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def run(config_file,vehicle_ego, SUMO):\r\n # Load configuration.\r\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\r\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\r\n config_file)\r\n\r\n # Create the population, which is the top-level object for a NEAT run.\r\n p = Population(config)\r\n\r\n # Add a stdout reporter to show progress in the terminal.\r\n p.add_reporter(neat.StdOutReporter(True))\r\n stats = neat.StatisticsReporter()\r\n p.add_reporter(stats)\r\n\r\n # Run for up to 300 generations.\r\n winner = p.run(eval_genomes, trafic, SUMO, number_episodes)\r\n# p.stop()\r\n\r\n # Display the winning genome.\r\n print('\\nBest genome:\\n{!s}'.format(winner))\r\n\r\n # Show output of the most fit genome against training data.\r\n print('\\nOutput:')\r\n# winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\r\n# for xi, xo in zip(xor_inputs, xor_outputs):\r\n# output = winner_net.activate(xi)\r\n# print(\r\n# \"input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output)\r\n# )\r\n\r\n if visualize is not None:\r\n node_names = {-1: 'distance', -2: 'v_ego',-3:'v_prec', -4:'v_allowed', 0: 'a_set'}\r\n visualize.draw_net(config, winner, True, node_names=node_names)\r\n visualize.plot_stats(stats, ylog=False, view=True)\r\n visualize.plot_species(stats, view=True)\r\n return winner", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal", "def greedy(self):\n # for each node, find the incoming link with the highest score.\n max_scores = {}\n max_sources = {}\n for source, target in self.iteredges():\n score = self.get_score(source, target)\n max_score = max_scores.get(target)\n if max_score is None or score > max_score:\n max_scores[target] = score\n max_sources[target] = source\n # then build a graph out of just these links.\n succs = dict((n, []) for n in self)\n for target, source in max_sources.items():\n succs[source].append(target)\n return Digraph(succs, self.get_score, self.get_label)", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def run_grid_search():\n\n best_score = 0\n best_learning_rate = 0\n best_discount_rate = 0\n best_initial_q_hat = 0\n trial_results = []\n number_of_trials = 30\n # TODO These ought to be done with numpy.arange but I don't have that package installed at the moment\n for learning_rate_raw in range(5, 50, 5):\n for discount_rate_raw in range(5, 20, 5):\n for initial_q_hat in range(0, 10, 1):\n learning_rate = learning_rate_raw * 0.01\n discount_rate = discount_rate_raw * 0.05\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=number_of_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n score = a.get_score()\n if score > best_score:\n best_score = score\n best_learning_rate = learning_rate\n best_discount_rate = discount_rate\n best_initial_q_hat = initial_q_hat\n trial_results.append((learning_rate, discount_rate, initial_q_hat, score, a.get_proportion_of_states_visited(), len(a.get_failed_trials())/ float(number_of_trials)))\n print \"Gridsearch finished, best learning rate: %.2f, best discount rate: %.2f, best initial q hat %i\" % (best_learning_rate, best_discount_rate, best_initial_q_hat)\n\n with open('gridsearch_results.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(('learning rate', 'discount factor', 'initial q-hat value', 'score', 'states visited', 'failed trials'))\n for result in trial_results:\n spamwriter.writerow(result)", "def grid_search(self):\n\t\tchoice_apply_BN = [False] if self.debug else [False] # True, False\n\t\tchoice_apply_RD = [False] if self.debug else [False] # True, False\n\n\t\tchoice_layers = [3] if self.debug else [3] # 1, 2, 3, 4\n\t\tchoice_hd_hn_af = ['S'] if self.debug else ['R'] # 'R6' | 'RK' | 'S' activation function w.r.t. head hidden layers\n\t\tchoice_tl_af = ['S'] if self.debug else ['R'] # activation function for the last layer, sigmoid is suggested due to zero-prediction\n\t\tchoice_hd_hn_tl_af = None\n\n\t\tchoice_apply_tl_af = [True] # True, False\n\n\t\tif choice_hd_hn_tl_af is not None:\n\t\t\tfor BN, RD, num_layers, af, apply_tl_af in product(choice_apply_BN, choice_apply_RD, choice_layers,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_hd_hn_tl_af, choice_apply_tl_af):\n\t\t\t\tffnns_para_dict = dict(FBN=False, BN=BN, RD=RD, num_layers=num_layers, HD_AF=af, HN_AF=af, TL_AF=af,\n\t\t\t\t\t\t\t\t\t apply_tl_af=apply_tl_af)\n\t\t\t\tsf_para_dict = dict()\n\t\t\t\tsf_para_dict['id'] = 'ffnns'\n\t\t\t\tsf_para_dict['ffnns'] = ffnns_para_dict\n\n\t\t\t\tself.sf_para_dict = sf_para_dict\n\t\t\t\tyield sf_para_dict\n\t\telse:\n\t\t\tfor BN, RD, num_layers, hd_hn_af, tl_af, apply_tl_af in product(choice_apply_BN, choice_apply_RD,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoice_layers, choice_hd_hn_af,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoice_tl_af, choice_apply_tl_af):\n\t\t\t\tffnns_para_dict = dict(FBN=False, BN=BN, RD=RD, num_layers=num_layers, HD_AF=hd_hn_af, HN_AF=hd_hn_af,\n\t\t\t\t\t\t\t\t\t TL_AF=tl_af, apply_tl_af=apply_tl_af)\n\t\t\t\tsf_para_dict = dict()\n\t\t\t\tsf_para_dict['id'] = 'ffnns'\n\t\t\t\tsf_para_dict['ffnns'] = ffnns_para_dict\n\n\t\t\t\tself.sf_para_dict = sf_para_dict\n\t\t\t\tyield sf_para_dict", "def NP_ATS(trajectory, min_gini):\r\n \r\n if len(trajectory) == 0:\r\n return -1\r\n \r\n \r\n \"\"\"\r\n Build partitions by avg velocity\r\n \"\"\"\r\n\r\n\r\n # epsilon_dict = [0.004592016 / 111, 0.01176842 / 111, 0.02649389 / 111, 0.05039507 / 111] #g=0.4\r\n epsilon_dict = [0.003461152 / 111, 0.02017883 / 111, 0.03125521 / 111, 0.08043219 / 111] #g=0.1, MAX Final Version\r\n # epsilon_dict = [0.00502254864192/111, 0.0255854290033/111, 0.040629531118/111, 0.112363957637/111] # g=0.3 max\r\n # epsilon_dict = [0.0129068727529/111,0.0336671793759/111,0.0587065427654/111,0.16344623299/111] # g=0.5 max\r\n # epsilon_dict = [0.0395070706597/111,0.0868617737273/111,0.220840329515/111,0.224195931453/111] # g=0.7 max\r\n # epsilon_dict = [0.0494846202161/111, 0.0942020880901/111, 0.252738388544/111, 0.347107672303/111] # g=0.9 max\r\n \r\n velocity_list = get_velocity(trajectory)\r\n \r\n epsilon_list = [epsilon_dict[label(v)] for v in velocity_list]\r\n \r\n # print [label(v) for v in velocity_list]\r\n # print epsilon_list\r\n \r\n S = EBT_Adaptive(trajectory, epsilon_list)\r\n\r\n return S", "def eg4(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg4_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n noise_feature = np.random.randn(n, p_noise)\n stable_feature_dependent = np.zeros([n, p_stable])\n stable_feature_independent = np.random.randn(n, p_stable)\n for i in range(p_stable):\n stable_feature_dependent[:, i] = noise_feature[:, i % p_noise] + noise_feature[:,\n (i + 1) % p_noise] + 2 * np.random.randn(\n n) # still need noise\n stable_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n stable_depend_label = np.concatenate([stable_depend_label] * p_stable, axis=1)\n stable_feature = np.where(stable_depend_label < depend_ratio, stable_feature_dependent,\n stable_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n Y = np.matmul(stable_feature, b) + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg4'\n return data\n\n data_train = eg4_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg4_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):\r\n data = GPy.util.datasets.robot_wireless()\r\n\r\n # create simple GP Model\r\n m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)\r\n\r\n # optimize\r\n if optimize:\r\n m.optimize(messages=True, max_iters=max_iters)\r\n\r\n Xpredict = m.predict(data['Ytest'])[0]\r\n if plot:\r\n pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')\r\n pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')\r\n pb.axis('equal')\r\n pb.title('WiFi Localization with Gaussian Processes')\r\n pb.legend(('True Location', 'Predicted Location'))\r\n\r\n sse = ((data['Xtest'] - Xpredict)**2).sum()\r\n\r\n print m\r\n print('Sum of squares error on test data: ' + str(sse))\r\n return m", "def get_skims(G, transfer_penalty=300, delta=0.2):\n # shortest path\n sp = nx.shortest_path(G, weight='total_travel_time')\n # create a dictionary for stop travel impedance values\n # The travel impedance is also decomposed\n # GTC: total generalized travel cost\n # IVT: in-vehicle travel time\n # NONIVT: the remaining part related to transfer and waiting times\n ti = {}\n fields = ['GTC', \"IVT\", \"WT\", \"TRANSFER\", \"NONIVT\"]\n for key in sp.keys():\n ti[key] = {}\n for field in fields:\n ti[key][field] = dict()\n for source in sp.keys():\n for target in sp[source].keys():\n cur_sp = sp[source][target]\n for field in fields:\n ti[source][field][target] = 0\n if not len(cur_sp) == 1:\n # if not the node itself\n for k in range(len(cur_sp) - 1):\n i = cur_sp[k]\n j = cur_sp[k + 1]\n ti[source]['IVT'][target] += G[i][j]['ivt']\n ti[source]['WT'][target] += G[i][j]['wt']\n ti[source]['NONIVT'][target] += G[i][j]['wt']\n ti[source]['NONIVT'][target] += (len(cur_sp) - 2) * transfer_penalty\n ti[source]['TRANSFER'][target] = (len(cur_sp) - 2) * transfer_penalty\n ti[source]['GTC'][target] = ti[source]['IVT'][target] + ti[source]['NONIVT'][target]\n\n skims = DotMap()\n for field in fields:\n skims[field] = pd.DataFrame([ti[i][field] for i in sp.keys()], index=sp.keys())\n\n x_list = list(nx.get_node_attributes(G, 'x').values())\n y_list = list(nx.get_node_attributes(G, 'y').values())\n df = pd.DataFrame({'node_id': list(sp.keys()), 'x': x_list, 'y': y_list})\n df = df.set_index('node_id')\n\n skims['pos'] = df\n return skims", "def SGD(epochs, eta):\n # print([[[k] for k in i] for i in self.x])\n # print([[j] for j in self.y])\n # print([i] for)\n\n # if test_data: n_test = len(test_data)\n for j in xrange(epochs):\n # mini_batches = [training_data[k:k + mini_batch_size] for k in xrange(0, n, mini_batch_size)]\n # for mini_batch in mini_batches:\n # update_mini_batch(mini_batch, eta)\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in training_data:\n delta_nabla_b, delta_nabla_w = backprop(x, y)\n nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n # for x, y in training_data:\n # delta_nabla_b, delta_nabla_w = backprop(x, y)\n # nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n # nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [w - (eta ) * nw\n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b - (eta ) * nb\n for b, nb in zip(self.biases, nabla_b)]\n\n # print 'Epoch {0}'.format(j)\n # print self.weights\n # print self.biases\n\n # if test_data:\n # print \"Epoch {0}: {1} / {2}\".format(\n # j, evaluate(test_data), n_test)\n # else:\n # print \"Epoch {0} complete\".format(j)", "def test_tdg_gate_nondeterministic_default_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def get_observation_driver_state(self):\n next_state = np.zeros(self.n_grids)\n grids = list(self.grids.values())\n for idx, grid in enumerate(grids):\n if grid is not None:\n next_state[idx] = grid.get_idle_driver_numbers_loop()\n return next_state", "def fitness_function(neural_net):\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness", "def grbefgs(self):\n print('Performing GrBeFGS\\n')\n\n frontier = PriorityFrontier()\n\n initial_heuristic = self.get_heuristic(self.initial_state)\n initial_node = SearchNode(self.initial_state)\n frontier.insert(initial_node, initial_heuristic)\n\n visited_nodes = set()\n \n while True:\n if frontier.is_empty():\n # Search failure\n return GenericResult(failure=True)\n \n # Get the next leaf node from the frontier\n leaf_node = frontier.pop()\n \n # Add this node to the visited nodes set\n visited_nodes.add(leaf_node)\n \n # Check for the goal state\n if self.check_goal_state(leaf_node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the GenericResult class solution member\n return GenericResult(solution=Solution(final_state=leaf_node.state, actions=self.get_action_path(leaf_node)))\n \n # Generate all possible actions for the given state\n actions = self.get_actions(leaf_node.state)\n \n # Create search nodes from the generated actions\n for action in actions:\n # Generate a new state from the given action\n new_state = self.get_result(leaf_node.state, action)\n \n # Get the new state's heuristic\n new_heuristic = self.get_heuristic(new_state)\n\n # Create a new search node with the created state\n new_node = SearchNode(new_state, leaf_node, action)\n \n # If this node has already been visited, ignore it\n if new_node in visited_nodes:\n continue\n\n # Check for any nodes with the same state as new_state and with better h values that \n # have yet to be visited in the frontier before adding new_node\n if new_node in frontier:\n frontier_node = frontier.peek_node(new_node)\n frontier_heuristic = self.get_heuristic(frontier_node.state)\n\n if frontier_heuristic <= new_heuristic:\n # The original heuristic was less than or equal to the new node\n # Disregard the new node\n continue\n \n else:\n # The new node's heuristic is larger\n # Remove the original node from the frontier\n frontier.remove_node(frontier_node)\n \n # Add the new node to the frontier\n frontier.insert(new_node, new_heuristic)", "def get_tuned_excitatory_weights(self):\n \n self.W_ee=np.zeros((self.N_e,self.N_e))\n \n if not hasattr(self,'fixed_connectivity_tuning'):\n self.fixed_connectivity_tuning=1\n \n num_tuned_conns=int(np.floor(self.fixed_connectivity_tuning*self.num_conns_ee))\n num_untuned_conns=self.num_conns_ee-num_tuned_conns\n \n for i in xrange(self.N_e):\n ref_phase=self.gp.phases[i,:]\n dists=gl.get_periodic_dist_on_rhombus(self.n_e,ref_phase,self.gp.phases,self.gp.u1,self.gp.u2)\n sorted_idxs=np.argsort(dists)\n \n tuned_idxs=sorted_idxs[:self.num_conns_ee]\n np.random.shuffle(tuned_idxs)\n\n #untuned_idxs=np.setdiff1d(np.arange(self.N_e),tuned_idxs)\n all_idxs=np.arange(self.N_e)\n np.random.shuffle(all_idxs)\n \n self.W_ee[i,tuned_idxs[0:num_tuned_conns]]=self.W_max_ee\n self.W_ee[i,all_idxs[:num_untuned_conns]]=self.W_max_ee\n \n \n self.W[:self.N_e,:self.N_e]=self.W_ee", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def compute_driver_instructions(self, t, zones):\n t_fifteen = np.min([t for i, j, t in self.sol_p.keys()])\n if t == 25200:\n # initialization step\n for veh in self.vehicles:\n if veh.driver_type == DriverType.AV:\n veh.rebalance(zones, veh.ozone)\n # this should be a memoized function\n for zone in zones:\n one_t_ahead_sol_p = {(i, j, t): self.sol_p[(i, j, t)] for i, j, t in self.sol_p.keys() if (t == t_fifteen\n and i == zone.id)}\n one_t_ahead_sol_r = {(i, j, t): self.sol_r[(i, j, t)] for i, j, t in self.sol_r.keys() if (t == t_fifteen\n and i == zone.id)}\n one_t_ahead_sol_d = {(i, j, t): self.sol_d[(i, j, t)] for i, j, t in self.sol_r.keys() if (t == t_fifteen\n and i == zone.id)}\n # what happens when it's empty???\n # print(f\"instruct drivers in zone {zone.id}\")\n zone.instruct_drivers(t, zones, one_t_ahead_sol_p, one_t_ahead_sol_r, one_t_ahead_sol_d)", "def select_best_chanels():\r\n \r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n \r\n chanels_rank = rank_chanels()\r\n \r\n result = []\r\n for i in range(1, len(chanels_rank)):\r\n intermidiate_result = []\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n train_psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq)\r\n test_psd_signals = eval_psd_not_modulated(test_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n\r\n train_psd_signals = flatten_data(train_psd_signals[:,:,chanels_rank[:i]])\r\n test_psd_signals = flatten_data(test_psd_signals[:,:,chanels_rank[:i]])\r\n \r\n acc = evalute_subset(train_psd_signals, test_psd_signals, train_full_anots, test_full_annoations)\r\n intermidiate_result.append(acc)\r\n \r\n result.append(intermidiate_result)\r\n #mean_subject_acc = np.array([sum(humans_acc)/len(humans_acc) for humans_acc in result])\r\n #best_idx = np.argmax(mean_subject_acc)\r\n\r\n return result, chanels_rank", "def sgd(self, training_data, epochs, batch_size, eta=0.01, lambda_r=0.2, verbose=False, test_data=None):\n stagnation_epochs = 5\n eta_divide_factor = 4\n eta_decrease_available = 4\n\n self.accuracies = np.zeros(epochs)\n self.costs = np.zeros(epochs)\n self.eta = eta\n self.lambda_r = lambda_r\n\n n = len(training_data)\n curr_stagnation = stagnation_epochs\n min_cost = float('inf')\n\n for idx, e in enumerate(range(epochs)):\n np.random.shuffle(training_data)\n batches = [np.array(training_data[k:k + batch_size], dtype=object)\n for k in range(0, len(training_data), batch_size)]\n\n for batch in batches:\n self.gradient_descent_step(batch, eta, lambda_r, n)\n\n self.costs[idx] = self.cost_func.prediction_cost(self.feed_forward([x_ for x_, y_ in training_data])[2],\n np.array([y_ for x_, y_ in training_data])) + \\\n self.weights_cost(n, self.lambda_r)\n\n if verbose:\n print('Epoch {} ended with cost {:.3f}'.format(e + 1, self.costs[idx]))\n\n min_cost = min(min_cost, self.costs[idx])\n\n if idx != 0:\n curr_stagnation = curr_stagnation - 1 if self.costs[idx] != min_cost else stagnation_epochs\n\n if curr_stagnation == 0:\n curr_stagnation = stagnation_epochs\n eta /= eta_divide_factor\n eta_decrease_available -= 1\n min_cost = float('inf')\n\n if eta_decrease_available < 0:\n if verbose:\n print('Eta decreased maximum available times, terminating')\n break\n\n if verbose:\n print('Decreasing eta to {:.5f}'.format(eta))\n\n if test_data:\n self.accuracies[idx] = self.metric.metric_value(self.predict([x_ for x_, y_ in test_data]),\n [y_ for x_, y_ in test_data])\n\n if verbose:\n self.plot_costs()\n\n if test_data:\n self.plot_metric_values()", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def generative_model(X, Y, Xs_test, Ys_test):\n initial_sensor_loc = np.random.randn(7, 2) * 100\n estimated_sensor_loc = find_mle_by_grad_descent_part_e(\n initial_sensor_loc, Y, X, lr=0.001, num_iters=1000)\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = np.array(\n [get_object_location(estimated_sensor_loc, X_test_single) for X_test_single in X_test])\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test)**2, axis=1)))\n mses.append(mse)\n return mses", "def generative_model(X, Y, Xs_test, Ys_test):\n initial_sensor_loc = np.random.randn(7, 2) * 100\n estimated_sensor_loc = find_mle_by_grad_descent_part_e(\n initial_sensor_loc, Y, X, lr=0.001, num_iters=1000)\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = np.array(\n [get_object_location(estimated_sensor_loc, X_test_single) for X_test_single in X_test])\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test)**2, axis=1)))\n mses.append(mse)\n return mses", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def grid_search_epsilon(environmnet, policy='ε–greedy', parameter='epsilon'):\n\tparameter_values = []\n\tavg_scores = []\n\tavg_steps = []\n\n\tcount = 1\n\tdecay_search = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.99]\n\tfor param_num in decay_search:\n\n\t\tagent = Q_Agent(exploration_rate_decay=param_num, epsilon=1)\n\t\tall_iterations, all_rewards, step_count = agent.train(environmnet, print_results=True, iter_n=1000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t policy=policy)\n\t\tavg_scores.append(np.mean(all_rewards))\n\t\tavg_steps.append(np.mean(step_count))\n\t\tparameter_values.append(param_num)\n\t\trewards_data = np.array([all_iterations, all_rewards])\n\t\tstep_data = np.array([all_iterations, step_count])\n\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_rewards_' + str(\n\t\t\t\tparam_num) + '.csv', rewards_data.transpose(), delimiter=\",\")\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_steps_' + str(\n\t\t\t\tparam_num) + '.csv', step_data.transpose(), delimiter=\",\")\n\t\tif count % 50 == 0:\n\t\t\tprint('iteration {} of 10'.format(count))\n\n\t\tcount += 1\n\tresults = {\n\t\t'param_values': parameter_values,\n\t\t'avg_scores': avg_scores,\n\t\t'avg_steps': avg_steps,\n\n\t}\n\tprint(results)\n\treturn pd.DataFrame(results)", "def TNG_net(self): \n \n import h5py as h5\n filename = localpath+'input/yields/TNG/SNII.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n \n # Define element indexing\t\t\t\n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['C'] = 'Carbon'\n indexing['N']= 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['Ne'] = 'Neon'\n indexing['Mg'] = 'Magnesium'\n indexing['Si'] = 'Silicon'\n indexing['S'] = 'Sulphur' # Not used by TNG simulation\n indexing['Ca'] = 'Calcium' # Not used by TNG simulation\n indexing['Fe'] = 'Iron'\n \n self.elements = list(indexing.keys())\n \n self.table = {}\n \n # Define masses / metallicities\n self.metallicities = list(f['Metallicities'].value)\n self.masses = f['Masses'].value\n\n \n for z_index,z in enumerate(self.metallicities):\n \n yield_subtable = {}\n \n z_name = f['Yield_names'].value[z_index].decode('utf-8')\n z_data = f['Yields/'+z_name+'/Yield']\n \n ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value\n \n yield_subtable['Mass'] = self.masses\n remnants = self.masses-ejecta_mass\n yield_subtable['mass_in_remnants'] = np.divide(remnants,self.masses)\n for el in list(indexing.keys()):\n yield_subtable[el] = np.zeros(len(self.masses))\n \n summed_yields = np.zeros(len(self.masses))\n \n for m_index,mass in enumerate(self.masses):\n for el_index,el in enumerate(self.elements):\n el_yield_fraction = z_data[el_index][m_index]/mass #(mass-remnants[m_index]) # Find fraction of ejecta per element\n yield_subtable[el][m_index] = el_yield_fraction\t\t\t\t\t\n summed_yields[m_index]+=el_yield_fraction # Compute total yield\n \n yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable", "def driver_statistics(self):\n df_feature_ = self.df_feature[~self.df_feature[0].values][['LABEL', 1, 2, 6, 7, 11, 12, 16, 17, 21, 22, 26, 27]]\n df_feature_.columns = ['LABEL', 'Speed_Mean', 'Speed_Var', \"Ac_Mean\", \"Ac_Var\", \"Dc_Mean\", \"Dc_Var\", \n 'Steer_Speed_Mean', 'Steer_Speed_Var', \"Steer_Ac_Mean\", \"Steer_Ac_Var\", \"Steer_Dc_Mean\", \"Steer_Dc_var\"]\n df_feature_['DISTANCE'] = self.df[~self.df_feature[0].values]['DISTANCE']\n return df_feature_.groupby('LABEL').mean()", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def TNG_net(self):\n import h5py as h5\n filename = localpath+'input/yields/TNG/AGB.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n\n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['C'] = 'Carbon'\n indexing['N']= 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['Ne'] = 'Neon'\n indexing['Mg'] = 'Magnesium'\n indexing['Si'] = 'Silicon'\n indexing['S'] = 'Sulphur' # Not used by TNG simulation\n indexing['Ca'] = 'Calcium' # Not used by TNG simulation\n indexing['Fe'] = 'Iron'\n\n self.elements = list(indexing.keys())\n \n self.table = {}\n \n self.metallicities = list(f['Metallicities'].value)\n self.masses = f['Masses'].value\n \n\n for z_index,z in enumerate(self.metallicities):\n\n yield_subtable = {}\n \n z_name = f['Yield_names'].value[z_index].decode('utf-8')\n z_data = f['Yields/'+z_name+'/Yield']\n \n ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value\n \n yield_subtable['Mass'] = list(reversed(self.masses))\n remnants = self.masses-ejecta_mass\n yield_subtable['mass_in_remnants'] = np.divide(list(reversed(remnants)),yield_subtable['Mass'])\n for el in list(indexing.keys()):\n yield_subtable[el] = np.zeros(len(self.masses))\n \n summed_yields = np.zeros(len(self.masses))\n \n for m_index,mass in enumerate(yield_subtable['Mass']):\n for el_index,el in enumerate(self.elements):\n el_yield = z_data[el_index][len(self.masses)-m_index-1]\n el_yield_fraction = el_yield/mass\n yield_subtable[el][m_index] = el_yield_fraction\n summed_yields[m_index]+=el_yield_fraction\n \n yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']\n \n self.table[z.astype(float)] = yield_subtable\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable", "def GNIs(features, labels, mode, params, config):\n del config\n N, H = params[\"N\"], params[\"H\"]\n n_samples = params[\"n_samples\"]\n\n params[\"non_targeted_layers\"] = []\n\n if params[\"input_inject\"]:\n params[\"non_targeted_layers\"] = list(range(1, N + 1))\n\n params[\"non_targeted_layers\"] += [N + 1]\n\n image_tile_summary(\"input\", features, rows=1, cols=16)\n\n # --- Ensure input data is flat\n features = tf.reshape(features, (-1, np.prod(params['image_shape'])))\n features = tf.cast(features, dtype=tf.float32)\n if labels is not None:\n labels = tf.cast(labels, dtype=tf.float32)\n else:\n labels = tf.ones_like(features[:, :10], dtype=None)\n B = int_shape(labels)[0]\n n_output = int_shape(labels)[-1]\n\n if params['activation'] != 'linear':\n activation = getattr(tf.nn, params['activation'])\n else:\n activation = None\n\n # --- Make discriminator\n if params[\"disc_type\"] == 'mlp':\n mlp = make_mlp(activation, np.prod(params['image_shape']), N, H,\n n_output)\n if params[\"disc_type\"] == 'convnet':\n mlp = make_convnet(activation, params['image_shape'], n_output)\n if params[\"disc_type\"] == 'vgg':\n mlp = make_vgg13(activation, params['image_shape'], n_output)\n\n # --- Retrieve intermediate activations, and layer output\n # --- we don't want to mask the final layer so activations doesn't include the output layer\n p_phi_y = mlp(features)\n\n sel_layer_shapes = [p_phi_y['layer_shapes'][i] for i in range(N + 1)]\n\n # --- Get Predictions using log(p(y|x))\n preds = p_phi_y['activations'][-1]\n\n # --- Classification loss, log(p(y|x))\n if params[\"loss\"] == 'cross_entropy':\n loss = cross_entropy(labels, preds)\n pred_class = tf.argmax(input=preds, axis=-1)\n true_class = tf.argmax(input=labels, axis=-1)\n acc = tf.cast(tf.equal(pred_class, true_class), tf.float32)\n tf.compat.v1.summary.scalar(\"accuracy\", tf.reduce_mean(acc))\n elif params[\"loss\"] == 'mse':\n loss = square_error(labels, preds)\n\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n p_phi_y_noisy = replace_mask_layer(\n features,\n p_phi_y,\n non_targeted_layers=params['non_targeted_layers'],\n var=params[\"var\"],\n n_samples=n_samples,\n mode=params[\"noise_mode\"])\n\n preds_noisy = p_phi_y_noisy['activations'][-1]\n\n # --- Classification loss, log(p(y|x))\n if params[\"loss\"] == 'cross_entropy':\n noisy_loss = cross_entropy(labels, preds_noisy)\n elif params[\"loss\"] == 'mse':\n noisy_loss = square_error(labels, preds_noisy)\n\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(\n params[\"learning_rate\"])\n\n gradients, variables = [], []\n\n tf.compat.v1.summary.scalar(\"learning_rate\", params[\"learning_rate\"])\n tf.compat.v1.summary.scalar(\"batch_size\", B)\n\n # --- Enumerate over activation layers, zip automatically removes final\n # --- logit layer\n\n layers = [\n l for l in p_phi_y['net'].layers\n if ('dense' in l.name or 'conv' in l.name)\n ]\n\n noises = [\n tf.reshape(n, (B, n_samples, -1)) for n in p_phi_y_noisy['noise'][:-1]\n ]\n\n weights = [layers[i].trainable_weights[0] for i in range(N + 1)]\n acts = p_phi_y['activations'][:-1]\n\n Js = [\n tf.reshape(batch_jacobian(preds, a, use_pfor=True), (B, -1, n_output))\n for a in acts\n ]\n print(Js)\n\n G, C, H = calc_taylor_expansion(Js, loss, preds, noises, B, n_samples)\n\n EC = calc_tikhonov_reg(Js, acts, preds, params[\"noise_mode\"],\n params[\"var\"], params[\"loss\"])\n\n H_sig = heavy_tail_variance(Js, loss, preds)\n\n l_noise = 0\n if params[\"noise_type\"] is None:\n noisy_loss_estimate = loss\n elif params[\"noise_type\"] == 'input':\n noisy_loss_estimate = noisy_loss\n elif 'full' in params[\"noise_type\"]:\n # --- This is the Gaussian stuff\n assert n_samples == 1\n l_noise += H + G + C\n noisy_loss_estimate = loss + l_noise\n\n elif 'marginal' in params[\"noise_type\"]:\n # --- Don't ever noise final layer\n assert n_samples == 1\n l_noise = EC\n if 'H' in params[\"noise_type\"]:\n l_noise += H\n\n if 'C' in params[\"noise_type\"]:\n # alpha, beta, sigma, mu = tf.py_func(\n # estimate_all_params,\n # inp=[(C - EC)],\n # Tout=[tf.float32, tf.float32, tf.float32, tf.float32])\n #\n # tf.compat.v1.summary.scalar('C/alpha', alpha)\n # tf.compat.v1.summary.scalar('C/beta', beta)\n # tf.compat.v1.summary.scalar('C/sigma', sigma)\n # tf.compat.v1.summary.scalar('C/mu', mu)\n # tf.compat.v1.summary.scalar('C', tf.reduce_mean(C - EC))\n # tf.compat.v1.summary.histogram('C', C)\n l_noise += (C - EC)\n if 'G' in params[\"noise_type\"]:\n l_noise += G\n noisy_loss_estimate = loss + l_noise\n\n actual_noise = tf.reduce_mean(noisy_loss - loss)\n estimated_noise = tf.reduce_mean(noisy_loss_estimate - loss)\n\n tf.compat.v1.summary.scalar('loss/actual_noise', actual_noise)\n tf.compat.v1.summary.scalar('loss/estimated_noise', estimated_noise)\n\n tf.compat.v1.summary.scalar(\"loss/noisy_\" + params[\"loss\"],\n tf.reduce_mean(noisy_loss))\n tf.compat.v1.summary.scalar(\"loss/og_\" + params[\"loss\"],\n tf.reduce_mean(loss))\n\n noise_err = tf.reduce_mean(estimated_noise - actual_noise)\n\n tf.compat.v1.summary.scalar(\n 'loss/noise_est_pe',\n tf.abs(noise_err / tf.reduce_mean(actual_noise + 1e-8)))\n\n tf.compat.v1.summary.scalar('loss/noise_est_mse',\n tf.abs(tf.reduce_mean(noise_err**2)))\n\n loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)\n\n tf.compat.v1.summary.scalar(\n 'loss/loss_est_pe',\n tf.abs(loss_err / tf.reduce_mean(noisy_loss + 1e-8)))\n\n tf.compat.v1.summary.scalar('loss/loss_est_mse',\n tf.abs(tf.reduce_mean(loss_err**2)))\n\n if params[\"L2\"] > 0:\n vars = tf.trainable_variables()\n l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * params[\"L2\"]\n noisy_loss_estimate += l2_reg\n tf.compat.v1.summary.scalar(\"loss/L2_reg\", l2_reg)\n loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)\n\n # tf.compat.v1.summary.image('activations_covariance', activation_covariance)\n # g_noise =\n for i, w in enumerate(weights):\n layer_name = \"layer_\" + str(i)\n num_params = np.prod(int_shape(w))\n\n a = p_phi_y['activations'][i]\n noisy_a = p_phi_y_noisy['activations'][i]\n inj_noise = noisy_a - a\n print(noisy_a, a)\n\n # --- Display in tensorboard -- Injected noise stats\n tf.compat.v1.summary.histogram(layer_name + '/injected_noise',\n inj_noise)\n\n n_neurons = int_shape(a)[1]\n\n tf.compat.v1.summary.histogram(layer_name + '/w', w)\n corr = tfp.stats.correlation(a)\n tf.compat.v1.summary.scalar(layer_name + '/corr', tf.reduce_mean(corr))\n\n sparsity = tf.reduce_sum(tf.cast(a <= 1e-6, tf.float32))\n\n # tf.compat.v1.summary.scalar(layer_name + '/lifetime_sparsity',\n # sparsity / B)\n tf.compat.v1.summary.scalar(layer_name + '/population_sparsity',\n sparsity / (B * n_neurons))\n\n # --- Retrieve the noise of the gradient of each layer\n # --- = noisy gradients - gradients, this corresponds to\n # --- n_t * gradients where n_t is our noise matrix\n # --- W gradients\n\n og_W_n = tf.gradients([tf.reduce_mean(noisy_loss)], [w])[0]\n\n g_W_n = tf.gradients([tf.reduce_mean(noisy_loss_estimate)], [w])[0]\n g = tf.gradients(tf.reduce_mean(loss), w)[0]\n\n err = -g_W_n + og_W_n\n g_noise = g_W_n - g\n\n tf.compat.v1.summary.scalar(layer_name + '/mean_grad_noise',\n tf.reduce_mean(g_noise))\n tf.compat.v1.summary.histogram(layer_name + '/grad_noise', g_noise)\n\n tf.compat.v1.summary.scalar(layer_name + '/weights_l2/',\n tf.reduce_mean(tf.norm(w)))\n\n tf.compat.v1.summary.scalar(layer_name + '/grad_est_mse',\n tf.reduce_mean((og_W_n - g_W_n)**2))\n tf.compat.v1.summary.scalar(layer_name + '/grad_est_pe',\n tf.reduce_mean((-og_W_n + g_W_n) / og_W_n))\n\n gradients.extend([g_W_n])\n variables.extend([w])\n\n if i > 0 and params['calc_hessian']:\n # --- Number of parameters does not include batch_size\n\n hessians = trace_hessian([noisy_loss], weights)\n h_trace = tf.reduce_sum(tf.concat(hessians, axis=1)) / (B * n_samples)\n\n for i, h in enumerate(hessians):\n layer_name = \"layer_\" + str(i)\n tf.compat.v1.summary.scalar(layer_name + '/H_trace',\n tf.reduce_sum(h) / (B * n_samples))\n\n tf.compat.v1.summary.scalar('network/H_trace', h_trace)\n\n # --- Sum all them losses\n\n loss = tf.reduce_mean(loss)\n noisy_loss = tf.reduce_mean(noisy_loss)\n\n train_step = optimizer.apply_gradients(zip(gradients, variables),\n global_step=global_step)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n eval_metrics = {}\n predictions = {\n 'preds': tf.nn.softmax(p_phi_y['activations'][-1], axis=1)\n }\n predictions['GCH'] = G + C + H - EC\n\n for i, J in enumerate(Js):\n predictions['J' + str(i)] = J\n\n # for i, w in enumerate(weights):\n # predictions['dGCH' + str(i)] = tf.gradients(\n # [predictions['GCH']], [w])[0]\n if params['calc_hessian']:\n # --- Number of parameters does not include batch_size\n\n hessians = trace_hessian([noisy_loss], weights[1:3])\n h_trace = tf.reduce_sum(tf.concat(hessians,\n axis=1)) / (B * n_samples)\n\n predictions['h_trace'] = h_trace\n\n else:\n predictions = {}\n eval_metrics = {\n \"loss/og\": tf.compat.v1.metrics.mean(loss),\n }\n if params[\"loss\"] == 'cross_entropy':\n eval_metrics[\"accuracy\"] = tf.compat.v1.metrics.mean(acc)\n\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n predictions=predictions,\n train_op=train_step,\n eval_metric_ops=eval_metrics)", "def estimate_tree(self):\n logger.info('TreeCatTrainer.estimate_tree given %d rows',\n len(self._added_rows))\n complete_grid = self._tree.complete_grid\n edge_logits = self.compute_edge_logits()\n edges = estimate_tree(complete_grid, edge_logits)\n return edges, edge_logits", "def test_tdg_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def jupiteNetwork(): \n \n # Build a graph for large clos topo\n tor_cut, aggr_cut, spine_cut = 512, 256, 256\n #2048, 4096, 4096 #32*4*2, 64*2, 16*2\n switches, edges = tp.jupiter_topo(\n tor_cut=tor_cut, aggr_cut=aggr_cut, spine_cut=spine_cut\n )\n \n G = build_graph(edges)\n external_edges = []\n for node in G.nodes():\n if 'sh' in node:\n G.add_edge(node, 'e1')\n \n \"\"\"\n paths = list(nx.all_shortest_paths(G, 'tor385', 'tor1'))\n #print(paths)\n #eee\n paths = list(nx.all_shortest_paths(G, 'tor129', 'tor257'))\n #print(paths)\n paths = list(nx.all_shortest_paths(G, 'tor257', 'tor385'))\n #print(paths)\n #eee\n \"\"\"\n switch_nodes, hnodes, tors, anodes, snodes = tp.getJupiternNodes(\n tors_num=tor_cut, aggr_num=aggr_cut, spine_num=spine_cut\n )\n print('**** is_connected(G)', nx.is_connected(G))\n print('**** number of components', nx.number_connected_components(G))\n \n tors = tors[0:512] + ['e1']\n\n # Get the routing path of all nodes\n table_file_name = '../outputs/jupiter_routing_table_anodes_cut4.txt'\n\n if((os.path.isfile(table_file_name)) == False):\n table = all_routing(G, tors, table_file_name)\n else:\n json_data = open(table_file_name).read()\n table = json.loads(json_data)\n \n seeds, polys = cf.get_seeds_table_jupiter(switch_nodes + ['e1']) #\n \n return G, tors, edges, table, seeds, polys, anodes", "def get_network(data, parameters, num_layers, layers=None, save=None):\n d_act = get_d_act(parameters)\n\n if layers is None:\n layers = [[\"relu\", \"dropout\"], [\"relu\", \"regularizer\"], [\"relu\"], [\"sigmoid\"]]\n # If some layers haven't None or regularizer\n layers = [layer if len(layer) > 1 else layer + [None] for layer in layers]\n\n best_networks = []\n for layer in layers:\n acts = get_act_by_layer(layer, d_act)\n if not isinstance(acts, list):\n acts = [acts]\n for act in acts:\n param_grid = {'act': [act]}\n grid_search = GridSearchCV(ModelKeeper(), param_grid)\n grid_search.fit(data.train, data.train_labels, epochs=200)\n flatten = itertools.chain.from_iterable\n grid_search.score(list(ft.reduce(operator.iconcat, data.test, [])), list(ft.reduce(operator.iconcat, data.test_labels, [])))\n\n all_results = grid_search.cv_results_\n\n acts = [([act[\"act\"]], layer) for act in all_results[\"params\"]]\n results = all_results[\"mean_test_score\"]\n\n best_networks += sorted([(results[i], acts[i]) for i in range(len(results))])\n\n best_networks_by_iterations = []\n best_networks_by_iterations += best_networks\n submission = pd.DataFrame({'best_networks_by_iterations': best_networks_by_iterations})\n submission.to_csv('best_networks_by_iterations.csv', index=True)\n # Look over best_networks num_layers - 1 times\n for _ in range(num_layers - 1):\n k = len(best_networks)\n for i in range(k):\n network = best_networks[i]\n act_second = get_act_by_layer(network[1][1], d_act)\n\n param_grid = {'act': act_second}\n grid_search = GridSearchCV(ModelKeeper(network[1][0]), param_grid)\n grid_search.fit(data.train, data.train_labels, epochs=200)\n grid_search.score(list(ft.reduce(operator.iconcat, data.test, [])), list(ft.reduce(operator.iconcat, data.test_labels, [])))\n\n all_results = grid_search.cv_results_\n\n acts = [(network[1][0] + [act[\"act\"]], network[1][1]) for act in all_results[\"params\"]]\n results = all_results[\"mean_test_score\"]\n\n best_networks += sorted([(results[i], acts[i]) for i in range(len(results))], reverse=True)[:2]\n\n best_networks = sorted(best_networks, reverse=True)[:len(best_networks) // 3 + 1]\n best_networks_by_iterations += best_networks\n\n if save:\n # Save result by iteration\n best_networks_by_iterations += best_networks\n submission = pd.DataFrame({'best_networks_by_iterations': best_networks_by_iterations})\n submission.to_csv('best_networks_by_iterations.csv', index=True)\n\n return best_networks[:3], best_networks_by_iterations", "def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res", "def straigh_subsample(neuorn, distance):\n\n # Selecting the main points: branching nodes and end nodes\n selected_index = get_main_points()\n\n # for each segment between two consecuative main points, a few nodes from the segment will be added to the selected node.\n # These new nodes will be selected base on the fact that neural distance of two consecuative nodes is around 'distance'.\n # Specifically, it starts from the far main point, and goes on the segment toward the near main point. Then the first node which is\n # going to add has the property that it is the farest node from begining on the segment such that its distance from begining is\n # less than 'distance'. The next nodes will be selected similarly.\n\n for i in selected_index:\n upList = np.array([i], dtype = int)\n index = neuorn.parent_index[i]\n dist = neuorn.distance_from_parent[i]\n while(~np.any(selected_index == index)):\n upList = np.append(upList,index)\n index = neuorn.parent_index[index]\n dist = np.append(dist, sum(neuorn.distance_from_parent[upList]))\n dist = np.append(0, dist)\n (I,) = np.where(np.diff(np.floor(dist/distance))>0)\n I = upList[I]\n selected_index = np.append(selected_index, I)\n selected_index = np.unique(selected_index)\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def test_tdg_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def grid_search(verbose):\n\n # Load Ising data.\n Ising_Data = prepare_Ising_DNN()\n\n # Perform grid search over learning rate and number of hidden neurons.\n N_neurons=np.logspace(0,3,4).astype(\"int\") # Check number of neurons over multiple decades.\n learning_rates=np.logspace(-6,-1,6)\n\n # Pre-allocate variables to store accuracy and loss data.\n train_loss=np.zeros((len(N_neurons),len(learning_rates)),dtype=np.float64)\n train_accuracy=np.zeros_like(train_loss)\n test_loss=np.zeros_like(train_loss)\n test_accuracy=np.zeros_like(train_loss)\n critical_loss=np.zeros_like(train_loss)\n critical_accuracy=np.zeros_like(train_loss)\n\n # Grid search.\n for i, neurons in enumerate(N_neurons):\n for j, lr in enumerate(learning_rates):\n\n print(\"training DNN with %4d neurons and SGD lr=%0.6f.\" %(neurons,lr) )\n\n train_loss[i,j],train_accuracy[i,j],\\\n test_loss[i,j],test_accuracy[i,j],\\\n critical_loss[i,j],critical_accuracy[i,j] = evaluate_model(neurons,lr,Ising_Data,verbose)\n\n plot_data(learning_rates,N_neurons,train_accuracy, \"training\")\n plot_data(learning_rates,N_neurons,test_accuracy, \"testing\")\n plot_data(learning_rates,N_neurons,critical_accuracy, \"critical\")", "def ensemble_models(input_data: str, test_file=None,models=None,\n models_file=None,\n genome_handler_file=None,\n top_n=10,\n trained=True,\n ensemble_method=\"average\",\n batch_size=64, nb_epoch=100, early_stop=None, mod=None,\n max_x_length=50, min_rt=0, max_rt=120, unit=\"s\", out_dir=\"./\", prefix=\"test\"):\n from AutoSeq import GenomeHandler\n\n # print(\"The number of models:\", len(models))\n\n # test data\n X_test = np.empty(1)\n Y_test = np.empty(1)\n\n y_pr = []\n score = []\n\n model_list = dict()\n\n\n if genome_handler_file is not None:\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=max_x_length,\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir)\n model_list['dp_model'] = dict()\n model_list['max_x_length'] = X_train.shape[1]\n model_list['aa'] = out_dir + \"/aa.tsv\"\n print(\"max_x_length: %s\" % (max_x_length))\n # read models from genetic search result configure file\n optimizer_name = dict()\n if models_file is not None:\n models = dict()\n gn = pd.read_csv(models_file)\n select_models = gn.sort_values('Val Accuracy', ascending=True).head(top_n)\n genome_handler = pickle.load(open(genome_handler_file, \"rb\"))\n genome_handler.input_shape = X_train.shape[1:]\n select_models = np.array(select_models.iloc[:, 0:(select_models.shape[1] - 2)])\n for i in range(0, select_models.shape[0]):\n #models[i], optimizer_name = genome_handler.decodeOneHot(select_models[i],return_optimizer=True)\n models[i], optimizer_name[i] = genome_handler.decodeOneHotPlusLSTM(select_models[i], return_optimizer=True)\n\n trained = False\n else:\n print(\"\")\n\n if not trained:\n print(\"Training ...\")\n # For each model, train the model\n for (name, model) in models.items():\n print(\"Train model:\", name)\n # perform sample specific training\n res_map = train_model(input_data=input_data, test_file=test_file, batch_size=batch_size,\n nb_epoch=nb_epoch, early_stop=early_stop, mod=mod,\n max_x_length=max_x_length, min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir, prefix=str(name), model=model,\n optimizer_name=optimizer_name[name])\n\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n res_map[\"model\"].save(model_file_path)\n\n model_list['dp_model'][name] = model_file_path\n\n del res_map\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n else:\n print(\"The models have been trained!\")\n\n\n else:\n\n ## Transfer learning\n with open(models_file, \"r\") as read_file:\n model_list = json.load(read_file)\n\n model_folder = os.path.dirname(models_file)\n aa_file = os.path.basename(model_list['aa'])\n aa_file = model_folder + \"/\" + aa_file\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=model_list['max_x_length'],\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir,aa_file=aa_file)\n\n\n new_model_list = dict()\n new_model_list['dp_model'] = dict()\n for (name, dp_model_file) in model_list['dp_model'].items():\n print(\"\\nDeep learning model:\", name)\n # keras model evaluation: loss and accuracy\n # load model\n model_name = os.path.basename(dp_model_file)\n model_full_path = model_folder + \"/\" + model_name\n\n model = load_model(model_full_path)\n #new_model = change_model(model, X_train.shape[1:])\n new_model = model\n\n print(\"Perform transfer learning ...\")\n n_layers = len(new_model.layers)\n print(\"The number of layers: %d\" % (n_layers))\n #for layer in new_model.layers:\n # layer_name = str(layer.name)\n # if layer_name.startswith(\"dense\"):\n # break\n # else:\n # layer.trainable = False\n # print(\"layer (frozen:True): %s\" % (layer_name))\n\n new_model.compile(loss='mean_squared_error',\n ## In this case, we cannot change the learning rate.\n optimizer=model.optimizer,\n #optimizer=Adam(lr=0.0001),\n #optimizer=SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),\n metrics=['mse', 'mae'])\n my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test, min_rt=min_rt, max_rt=max_rt)\n # Save model\n model_chk_path = out_dir + \"/best_model.hdf5\"\n mcp = ModelCheckpoint(model_chk_path, monitor=\"val_mean_squared_error\", save_best_only=True,\n save_weights_only=False,\n verbose=1, mode='min')\n\n ## monitor training information\n # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n new_model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_data=(X_test, Y_test),\n callbacks=[my_callbacks, mcp])\n\n ## get the best model\n best_model = load_model(model_chk_path)\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n best_model.save(model_file_path)\n\n new_model_list['dp_model'][name] = model_file_path\n\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n\n new_model_list['max_x_length'] = model_list['max_x_length']\n new_aa_file = out_dir + \"/\" + os.path.basename(model_list['aa'])\n copyfile(aa_file, new_aa_file)\n new_model_list['aa'] = new_aa_file\n\n ## Useful for new data prediction\n new_model_list['min_rt'] = min_rt\n new_model_list['max_rt'] = max_rt\n\n model_list = new_model_list\n\n\n # save model data\n #file_all_models = open(out_dir + \"/all_models.obj\", 'wb')\n #pickle.dump(models, file_all_models)\n #file_all_models.close()\n\n ####################################################################################################################\n print(\"Ensemble learning ...\")\n\n\n para = dict()\n para['min_rt'] = min_rt\n para['max_rt'] = max_rt\n\n ## save result\n model_json = out_dir + \"/model.json\"\n with open(model_json, 'w') as f:\n json.dump(model_list, f)\n\n ## evaluation\n if test_file is not None:\n ensemble_predict(model_json,x=X_test,y=Y_test,para=para, batch_size=batch_size,method=ensemble_method,\n out_dir=out_dir,\n prefix=\"final_eval\")\n\n ####################################################################################################################", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def multiplication_test():\r\n\r\n def fitness_function(neural_net):\r\n \"\"\"Calculate the fitness of a neural_net.\"\"\"\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness\r\n\r\n gen_size = 50\r\n net_size = (2, 1)\r\n genetic_algorithm = GeneticAlgorithm(gen_size, net_size, mutation_rate=0.3, mutation_chance=0.5)\r\n\r\n highest_so_far = 0\r\n while True:\r\n # Testing creatures\r\n for neural_net in genetic_algorithm.population:\r\n neural_net.fitness = fitness_function(neural_net)\r\n\r\n # Sorting creatures\r\n genetic_algorithm.calculate_stats()\r\n\r\n print(\"Gen\", genetic_algorithm.current_generation, \":\")\r\n print(\"Max fitness\", genetic_algorithm.stats.max_fitness)\r\n print(\"Mean fitness\", genetic_algorithm.stats.mean_fitness)\r\n highest_so_far = max(genetic_algorithm.stats.max_fitness, highest_so_far)\r\n print(\"Highest so far\", highest_so_far)\r\n\r\n\r\n # Starting next generation\r\n if genetic_algorithm.stats.max_fitness < 24.9 and genetic_algorithm.current_generation < 1000:\r\n genetic_algorithm.next_generation()\r\n else:\r\n break\r\n\r\n\r\n quit()\r\n\r\n\r\n for net in genetic_algorithm.sorted_population:\r\n print(net.fitness)\r\n best_neural_net = genetic_algorithm.sorted_population[0]\r\n print(\"Weights:\")\r\n print(best_neural_net.layers[0].weights[0])\r\n while True:\r\n print()\r\n in_a = input(\"Give net first number: \")\r\n in_b = input(\"Give net second number: \")\r\n answer = best_neural_net.calculate([np.log(float(in_a)), np.log(float(in_b))])[0]\r\n print(\"Net's answer:\", np.exp(answer))", "def eval_genomes(genomes, config_):\n data = next_batch()\n assert data is not None\n inputs, outputs = data\n inputs = preprocessor(inputs)\n for _, genome in tqdm(genomes):\n net = RecurrentNet.create(genome, config_)\n mse = 0\n for single_inputs, output in zip(inputs, outputs):\n net.reset()\n mask, score = gate_activation(net, single_inputs)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n mse += (xo - output.item())**2\n genome.fitness = 1 / (1 + mse)", "def test_using_ego_graph(self):\n assert_equal(nx.local_efficiency(self.G3), 7 / 12)", "def preprocessing_steiner(self, extension):\n G = nx.Graph(self.optimization_graph)\n terminal_nodes = [node for node, data in G.nodes(data=True)\n if data.get(config.NODE_TYPE_KEY, None) == config.BUILDING_NODE_TYPE]\n link = {}\n if not extension:\n return G, terminal_nodes, link\n old_Graph = self.old_network_graph\n H = nx.Graph(G.subgraph([n for n in G.nodes if n not in old_Graph.nodes]))\n H_connected_components = list(nx.connected_components(H))\n old_junctions = [n for n, d in old_Graph.nodes(data=True) if d['nodetype'] == 'junction']\n # Remove the old buildings from the terminal nodes list\n for node in [n for n in terminal_nodes if n in old_Graph.nodes]:\n terminal_nodes.remove(node)\n # Building the Graph on which we will use the heuristic\n for node in old_junctions:\n neighbors = [n for n in G.neighbors(node) if n in H.nodes]\n for cc in H_connected_components:\n sub_neighbors = [n for n in neighbors if n in cc]\n if len(sub_neighbors) == 0:\n continue\n dist, closest_neighbor = min([[G.edges[node, n]['cost'], n] for n in sub_neighbors], key=lambda t: t[0])\n if closest_neighbor not in link:\n link[closest_neighbor] = [node, dist]\n continue\n if dist < link[closest_neighbor][1]:\n link[closest_neighbor] = [node, dist]\n # Add a node corresponding to the old Graph and connected with the selected neighbors\n terminal_nodes.append('OldNetworkNode')\n for n in link:\n H.add_edge('OldNetworkNode', n, cost=link[n][1])\n G = H.copy()\n return G, terminal_nodes, link", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def bc_train_nvidia():\n\timg_rows,img_cols = 64,64\n\tinput_shape = (img_rows,img_cols,3)\n\n\t# the model\t\n\tmodel = bc_nvidia_model(input_shape = input_shape)\n\n\t\n\timg_dim = (img_rows,img_cols)\n\n\t# reading the drivelog\t\n\tcsv_data = pd.read_csv(data_path+csv_path,usecols=[\"center\",\"left\",\"right\",\"steering\"])\n\n\tthreshold = 1\n\tbatch_size = 240\n\tepochs = 6\n\tyvals = []\n\n\tfor i in range(epochs):\n\t\tgen = generate_data_train(data_path,csv_data,img_dim,batch_size,threshold,yvals)\n\t\t\n\t\tmodel.fit_generator(gen, samples_per_epoch = 24000, nb_epoch = 1, verbose = 1)\n\n\t\t# thresholding against values close to 0 to balance the data\n\t\tthreshold = 1/(i+1)\n\t\n\t# serialize model to JSON\n\tmodel_json = model.to_json()\n\twith open(\"model.json\", \"w\") as json_file:\n\t json_file.write(model_json)\n\t# serialize weights to HDF5\n\tmodel.save_weights(\"model.h5\")\n\twith open(\"s_angles\",\"wb\") as y_file:\n\t\tpickle.dump(yvals,y_file)\n\treturn", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', owa=True, fe_ratio=1,\n split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n train_E_false, test_E_false = stt.quick_nonedges(G, train_frac, fe_ratio)\n elif split_alg == 'timestamp':\n train_E, test_E, G = stt.timestamp_split(G, train_frac)\n train_E = set(zip(train_E[:, 0], train_E[:, 1]))\n test_E = set(zip(test_E[:, 0], test_E[:, 1]))\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Compute non-edges\n if split_alg != 'fast':\n num_fe_train = len(train_E) * fe_ratio\n num_fe_test = len(test_E) * fe_ratio\n if owa:\n train_E_false, test_E_false = stt.generate_false_edges_owa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n else:\n train_E_false, test_E_false = stt.generate_false_edges_cwa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n\n # Set class attributes to new values\n self.set_splits(train_E, train_E_false, test_E, test_E_false, directed=G.is_directed(), nw_name=nw_name,\n split_id=split_id, split_alg=split_alg, owa=owa, verbose=verbose)\n\n return train_E, train_E_false, test_E, test_E_false", "def find_best_classifier(x_train, x_test, y_train, y_test):\n max_depth, _ = find_best_parameters(\n 'max_depth', list(range(1, 30)),\n x_train, x_test, y_train, y_test)\n print(\"Best max_depth t: \", max_depth)\n min_samples_split, _ = find_best_parameters(\n 'min_samples_split', list(range(2, 400)),\n x_train, x_test, y_train, y_test)\n min_samples_split = int(min_samples_split)\n print(\"Best min samples split: \", min_samples_split)\n min_samples_leaf, _ = find_best_parameters(\n 'min_samples_leaf', list(range(2, 200)),\n x_train, x_test, y_train, y_test)\n min_samples_leaf = int(min_samples_leaf)\n print(\"Best sample leaf: \", min_samples_leaf)\n max_leaf_nodes, _ = find_best_parameters(\n 'max_leaf_nodes', list(range(2, 150)),\n x_train, x_test, y_train, y_test)\n max_leaf_nodes = int(max_leaf_nodes)\n print(\"Best max leaf nodes split: \", max_leaf_nodes)\n min_impurity_decrease, _ = find_best_parameters(\n 'min_impurity_decrease', np.arange(0.0005, 0.1, 0.0005),\n x_train, x_test, y_train, y_test)\n print(\"Best min impurity decrease: \", min_impurity_decrease)\n clf = DecisionTreeClassifier(\n min_impurity_decrease=min_impurity_decrease,\n max_depth=max_depth,\n min_samples_leaf=min_samples_leaf,\n max_leaf_nodes=max_leaf_nodes,\n min_samples_split=min_samples_split,\n random_state=0)\n clf = clf.fit(x_train, y_train)\n return clf", "def sgd_experiment():\n batch_size = 128\n layer_1_hidden_nodes = 80 ## Starting small so my computer can keep up with the ram requirements of LEEA :)\n\n (train_dataset, train_labels), (valid_dataset, valid_labels), (test_dataset, test_labels) = get_mnist()\n\n graph = tf.Graph()\n with graph.as_default():\n ## Data variables.\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(batch_size, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n ## Weights describing single layer.\n weights1 = tf.Variable(\n tf.truncated_normal([image_size * image_size, layer_1_hidden_nodes])\n )\n biases1 = tf.Variable(tf.zeros([layer_1_hidden_nodes]))\n weights2 = tf.Variable(\n tf.truncated_normal([layer_1_hidden_nodes, num_labels])\n )\n biases2 = tf.Variable(tf.zeros([num_labels]))\n\n ## Training variables.\n lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n logits = tf.matmul(lay1_train, weights2) + biases2\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels)\n )\n\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n train_prediction = tf.nn.softmax(logits)\n lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)\n\n num_steps = 3001\n\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n\n feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}\n\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n\n if (step % 250) == 0:\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels)\n )\n\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))", "def SOM(args):\n\n # Obtain the normalized set of cities (w/ coord in [0,1])\n cities = pd.read_csv(Path(args.data_dir) / 'data1.csv')\n\n iteration = args.iteration\n learning_rate = args.learning_rate\n decay = args.decay\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir_p()\n\n cities_nm = cities.copy()\n\n cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])\n cities_nm.to_csv(out_dir/'cities_nm.csv')\n cities.to_csv(out_dir/'cities.csv')\n\n\n depot = cities_nm.query('city==0')[['x','y']].to_numpy()\n # The population size is 8 times the number of cities\n #n = cities_cp.shape[0] * 2# a single route's neurons\n n=100\n # Generate an adequate network of neurons:\n #network = generate_network(n)\n neuron_chains =init_neurons(size=n,depot=depot)\n print('--> Network of {} neurons created. Starting the iterations:'.format(n))\n best_routes=np.array([0])\n\n #save\n losses_sum_log={}#每个循环losses_sum值\n min_losses_sum_log = {}##保存最小值的路径losses\n min_losses_log={}#存储最好情况下四条路径的距离值\n min_routes_log={}\n best_id=0\n min_losses_sum=0\n\n for i in tqdm(range(iteration)):\n if not i % args.neuro_plot_freq:\n print('\\t> Iteration {}/{}'.format(i, iteration), end=\"\\r\")\n # Choose a random city\n sample = cities_nm.sample(1)\n if int(sample['city']) in args.depot_idxs:\n continue\n city = sample[['x', 'y']].values#随机抽样 random sampling\n group_idx,winner_idx = select_closest_gpid(neuron_chains, city)\n\n # Generate a filter that applies changes to the winner's gaussian\n gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chains[0].shape[0])\n # Update the network's weights (closer to the city)\n neuron_chains[group_idx] += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chains[group_idx])\n # Decay the variables\n learning_rate = learning_rate * decay\n n = n * decay\n\n\n if i % args.evaluate_freq==0:\n cities_od = rebuild_cities(cities_nm,neuron_chains,args.num_depots)\n cities_od[['x','y']] =cities.reindex(cities_od['city'])[['x','y']]\n losses = routes_distances(cities_od)\n losses_sum = sum(losses)\n losses_sum_log[i] = losses_sum\n\n if min_losses_sum == 0 or min_losses_sum > losses_sum:\n min_losses_sum = losses_sum\n best_id = i\n routes = get_routes(cities_od)\n routes = [list(item.astype(np.float64)) for item in routes]\n min_routes_log[i] = routes\n\n min_losses_sum_log[i] = losses_sum\n min_losses_log[i] = losses\n cities_od.to_csv(out_dir/'data_out_{:04d}.csv'.format(i))\n save_neuron_chains(neuron_chains,out_dir/\"neuron_chains_{:04d}.npy\".format(i))\n\n #end for\n\n # Check if any parameter has completely decayed.\n if n < 1:\n print('Radius has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n if learning_rate < 0.001:\n print('Learning rate has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n\n\n print('Completed {} iterations.'.format(iteration))\n\n results = {}\n\n results['losses_sum_log']=losses_sum_log\n results['best_id'] = best_id\n\n results['min_losses_sum_log']=min_losses_sum_log\n results['min_losses_log']=min_losses_log\n results['min_routes_log'] = min_routes_log\n\n\n p = Path(out_dir/'results.json')\n with open(p, 'w') as fp:\n json.dump(results, fp)\n print('ok')\n\n\n return results", "def tuned_for_ec():\n # TODO(theosanderson): update these to true SOTA values\n hparams = contrib_training.HParams()\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('batch_size', 34)\n hparams.add_hparam('dilation_rate', 5)\n hparams.add_hparam('filters', 411)\n hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed\n hparams.add_hparam('kernel_size', 7)\n hparams.add_hparam('num_layers', 5)\n hparams.add_hparam('pooling', 'mean')\n hparams.add_hparam('resnet_bottleneck_factor', 0.88152)\n hparams.add_hparam('lr_decay_rate', 0.9977)\n hparams.add_hparam('learning_rate', 0.00028748)\n hparams.add_hparam('decision_threshold', 0.3746)\n hparams.add_hparam('denominator_power', 0.88)\n\n hparams.add_hparam('train_steps', 650000)\n return hparams", "def classify_eeg(eeg,srate):\r\n bin_size_sec = 30\r\n bin_size_samp = bin_size_sec*srate\r\n t = 0\r\n classified = np.zeros(len(eeg)/bin_size_samp)\r\n while t + bin_size_samp < len(eeg):\r\n classified[t/bin_size_samp] = classify_epoch(eeg[range(t,t+bin_size_samp)],srate)\r\n t = t + bin_size_samp\r\n return classified", "def classify_eeg(eeg,srate):\r\n bin_size_sec = 30\r\n bin_size_samp = bin_size_sec*srate\r\n t = 0\r\n classified = np.zeros(len(eeg)/bin_size_samp)\r\n while t + bin_size_samp < len(eeg):\r\n classified[t/bin_size_samp] = classify_epoch(eeg[range(t,t+bin_size_samp)],srate)\r\n t = t + bin_size_samp\r\n return classified", "def TST_C2ST(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n # if STAT.item() < threshold_lower:\r\n # h = 1\r\n return h, threshold, STAT", "def main(keep_best_count, mutation_factor, rounds, target, stagnate):\n ways = [range(len(DISTANCES))]\n result = {'round':0,'cost':None}\n for i in range(rounds):\n ways = mutate(ways,mutation_factor)\n best = []\n for way in ways:\n best.append((rate(way),way))\n best.sort()\n if VERBOSITY:\n for way in best:\n print way\n print \"Round %d best way is %s\" % (i+1, best[0][0])\n # break if we hit the target\n if best[0][0] <= target:\n print \"Hit Target\"\n break\n # break if we stagnate to long\n if result['cost'] is None or best[0][0] <result['cost']:\n result['cost'] = best[0][0]\n result['round'] = i+1\n elif result['round'] + stagnate <= i+1:\n print \"Stagnate to long\"\n break\n ways = list(b[1] for b in best[0:keep_best_count])\n print \"\"\n print \"best found order with cost=%d\" % best[0][0]\n print ' '.join(list(NAMES[i] for i in best[0][1]))\n print \"\"", "def _evaluate_performance__static_winners(self):\n # | - _evaluate_performance__\n\n # | - class attributes #################################################\n AL = self\n al_gen = self.al_gen\n verbose = self.verbose\n seed_ids = self.seed_ids\n acquisition_bin = self.acquisition_bin\n completed_ids = self.completed_ids\n CandidateSpace = self.CandidateSpace\n RegressionModel = self.RegressionModel\n DuplicateFinder = self.DuplicateFinder\n al_gen_dict = self.al_gen_dict\n\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n\n index_acq_gen_dict = self.index_acq_gen_dict\n #__| #################################################################\n\n # #####################################################################\n mode = \"lowest_N\" # 'lowest_N' or 'lowest_perc'\n\n N_ids = 10\n lowest_perc = 5\n\n # Number of consecutive generations that the Nth best systems must\n # remain static\n M_gens = 3\n # #####################################################################\n\n if mode == \"lowest_perc\":\n num_candidates = CandidateSpace.FingerPrints.df_pre.shape[0]\n N_ids = int(num_candidates * (lowest_perc * 0.01))\n\n gen_keys = list(AL.al_gen_dict.keys())\n\n if len(gen_keys) > M_gens:\n latest_M_keys = gen_keys[-(M_gens + 1):]\n last_gen_key = gen_keys[-1]\n\n al_gen_dict_subset_i = dict(zip(\n latest_M_keys,\n [AL.al_gen_dict.get(i, None) for i in latest_M_keys]))\n\n indices_list = []\n iterator = enumerate(al_gen_dict_subset_i.items())\n for i_cnt, (gen_i, AL_i) in iterator:\n model_i = AL_i.model\n\n model_i = AL.add_main_Y_to_model(\n model_i, plot_dft_instead_of_pred=True)\n model_i = model_i[(model_i[\"duplicate\"] == False)]\n model_i = model_i.sort_values(\"Y_main\")\n\n indices_i = model_i.index.tolist()\n\n indices_list.append(indices_i)\n\n if i_cnt >= M_gens:\n indices_i = indices_list[i_cnt][0:N_ids]\n ids_static_list = []\n for j in range(M_gens):\n indices_j = indices_list[i_cnt - (j + 1)][0:N_ids]\n ids_static = indices_j == indices_i\n ids_static_list.append(ids_static)\n\n ids_are_static = all(ids_static_list)\n\n self.performance__static_winners[last_gen_key] = ids_are_static\n #__|", "def do_graph( self, db_device_adapters, ts_begin, ts_end ):\n# self.graph_4( db_device_adapter, ts_begin, ts_end, min_points = 10 )\n# self.graph_many( db_device_adapters[0], ts_begin, ts_end, min_points = 10 )\n self.graph_many_really( db_device_adapters, ts_begin, ts_end, min_points = 10 )", "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()", "def tune(runner, kernel_options, device_options, tuning_options):\n\n #Bayesian Optimization strategy seems to need some hyper parameter tuning to\n #become better than random sampling for auto-tuning GPU kernels.\n\n #alpha, normalize_y, and n_restarts_optimizer are options to\n #https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html\n #defaults used by Baysian Optimization are:\n # alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces\n # n_restarts_optimizer=5,\n # normalize_y=True,\n\n #several exploration friendly settings are: (default is acq=\"ucb\", kappa=2.576)\n # acq=\"poi\", xi=1e-1\n # acq=\"ei\", xi=1e-1\n # acq=\"ucb\", kappa=10\n\n if not bayes_opt_present:\n raise ImportError(\"Error: optional dependency Bayesian Optimization not installed\")\n\n #defaults as used by Bayesian Optimization Python package\n acq = tuning_options.strategy_options.get(\"method\", \"poi\")\n kappa = tuning_options.strategy_options.get(\"kappa\", 2.576)\n xi = tuning_options.strategy_options.get(\"xi\", 0.0)\n init_points = tuning_options.strategy_options.get(\"popsize\", 5)\n n_iter = tuning_options.strategy_options.get(\"maxiter\", 25)\n\n tuning_options[\"scaling\"] = True\n\n results = []\n\n #function to pass to the optimizer\n def func(**kwargs):\n args = [kwargs[key] for key in tuning_options.tune_params.keys()]\n return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)\n\n bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)\n pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))\n\n verbose=0\n if tuning_options.verbose:\n verbose=2\n\n optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)\n\n optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)\n\n if tuning_options.verbose:\n print(optimizer.max)\n\n return results, runner.dev.get_environment()", "def test_random_forest_n_estimators_parameter(params, X_train, X_test, y_train, y_test):" ]
[ "0.5493443", "0.54361165", "0.5433714", "0.54278624", "0.5415707", "0.5382359", "0.53555316", "0.5353432", "0.53351855", "0.5288633", "0.5276035", "0.5257488", "0.52392656", "0.5214441", "0.51326734", "0.5129553", "0.5117215", "0.51140994", "0.5112784", "0.5106431", "0.509864", "0.5094039", "0.5090478", "0.5088907", "0.50884414", "0.5084534", "0.5083835", "0.50821716", "0.5081785", "0.50665176", "0.5065116", "0.50487", "0.5029696", "0.50273275", "0.5016712", "0.501427", "0.5008597", "0.5001297", "0.5000598", "0.49938217", "0.49930695", "0.49858305", "0.4981712", "0.497854", "0.4975491", "0.49675268", "0.4964524", "0.49613932", "0.49605668", "0.49569398", "0.49468842", "0.49435228", "0.49390176", "0.49380893", "0.49302542", "0.49163097", "0.49124697", "0.49103028", "0.49085203", "0.49081248", "0.49005166", "0.49005166", "0.48996222", "0.4890098", "0.4889512", "0.4889485", "0.488507", "0.48821223", "0.48779005", "0.48765084", "0.48752442", "0.48745632", "0.48718256", "0.48698953", "0.4868971", "0.48617882", "0.48546863", "0.48530215", "0.4847847", "0.48446873", "0.48442867", "0.48442507", "0.4840589", "0.48394337", "0.4837517", "0.48357052", "0.48312286", "0.48300076", "0.48251614", "0.48230976", "0.48212132", "0.48196954", "0.48181027", "0.48181027", "0.48105407", "0.4810274", "0.4802319", "0.48000985", "0.4798749", "0.47970846", "0.47963336" ]
0.0
-1
Downsamples spike data to include only the top 1% of frames
def downsample_spikes(S, thres=150, verbose=1): sum_S = np.sum(S, axis=0) if verbose > 0: print( 'Downsampling spike data to {} frames using threshold {}' .format(np.sum(np.greater(sum_S, thres)), thres)) return S[:, np.greater(sum_S, thres)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)", "def downsample_sam(self, factor):", "def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)", "def resample(self):\n pass", "def subbandwidth(self):", "def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor", "def downsample_fluorescence(F, thres=20, verbose=1):\n diff_F = np.diff(F, axis=1)\n sum_F = np.sum(diff_F, axis=0)\n F = F[:,:-1]\n if verbose > 0:\n print(\n 'Downsampling fluorescence data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_F, thres))))\n \n return F[:, np.greater(sum_F, thres)]", "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def get_next_sample(self):", "def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out", "def downsample_frame(self, data_frame, rate='5min'):\n if data_frame is pd.DataFrame:\n data_frame.resample(rate, how='mean', closed='right')\n pass", "def downsample(state):\n return state[::2, ::2, :]", "def downsample(data, downsampling, summary=np.sum, allow_trim=False):\n data = np.asarray(data)\n if data.ndim != 2:\n raise ValueError('Data must be 2 dimensional.')\n ny, nx = data.shape\n if not allow_trim and ((nx % downsampling) or (ny % downsampling)):\n raise ValueError('Data shape {0} does not evenly divide downsampling={1} and allow_trim is False.'\n .format((ny, nx), downsampling))\n ny //= downsampling\n nx //= downsampling\n shape = (ny, nx, downsampling, downsampling)\n strides = (downsampling * data.strides[0], downsampling * data.strides[1]) + data.strides\n blocks = np.lib.stride_tricks.as_strided(\n data[:downsampling * ny, :downsampling * nx], shape=shape, strides=strides)\n return summary(blocks, axis=(2, 3))", "def oversampling_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n oversampling=True)\n evaluate_both(model)\n plotting.plot_metrics(history)", "def downsample_pupil(df, pup_col, time_col, bin_size, method='median'): \r\n \r\n if method not in ['mean','median']:\r\n raise Exception(\"Invalid sampling method. Please use 'mean' or 'median'.\")\r\n \r\n # convert the microsecond timestamp to datetime timestamp\r\n df[time_col] = pd.to_datetime(df[time_col], unit = 'ms')\r\n \r\n # resampling on the datetime timestamp\r\n df[pup_col+'_resamp'] = df[pup_col]\r\n resampler = df[[time_col] + [pup_col+'_resamp']].resample(bin_size, on=time_col, loffset='0ms',label='left')\r\n \r\n # decide which method to calculate results\r\n if method == 'median':\r\n resampled_samps = resampler.median()\r\n elif method == 'mean':\r\n resampled_samps = resampler.mean()\r\n \r\n # convert the datetime timestamp back to microsecond timestamp\r\n resampled_samps.index = (resampled_samps.index - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1ms')\r\n \r\n return resampled_samps", "def downsample(self, number):\n for num, ss in enumerate(self.samples):\n self.samples[num], self.extra_kwargs[num] = _downsample(\n ss, number, extra_kwargs=self.extra_kwargs[num]\n )", "def down_sampling(record, down_sampling_factor=16):\n\n if len(record.shape) == 1:\n return record[slice(0, record.shape[0], down_sampling_factor)]\n else:\n row_idx = np.arange(record.shape[0])\n col_idx = np.arange(0, record.shape[1], down_sampling_factor)\n\n return record[np.ix_(row_idx, col_idx)]", "def _compute_quantized_subsamples(self):\n self._down_sample()\n self._quantize()", "def subsample():\n\n nwav = 872\n nrow = 1600\n ncol = 1560\n\n fpath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_binned/nrow1600')\n fnames = ['full_frame_20ms_faster_VNIR_1600.raw',\n 'full_frame_20ms_faster_VNIR_1600_flat.raw']\n\n for fname in fnames:\n print(\"SUBSAMPLE: reading data from {0}\".format(fpath))\n print(\"SUBSAMPLE: {0}\".format(fname))\n data = np.fromfile(os.path.join(fpath,fname)).reshape(nwav,nrow,ncol)\n\n for fac in [2,4,8]:\n trow = '{0:04}'.format(1600/fac)\n opath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_subsample',\n 'nrow'+trow)\n oname = fname.replace('1600',trow)\n\n print(\"SUBSAMPLE: writing subsampled data to {0}\".format(opath))\n print(\"SUBSAMPLE: {0}\".format(oname))\n data[:,::fac,::fac].tofile(open(os.path.join(opath,oname),'wb'))\n\n return", "def downsample(self, number):\n self.samples, self.extra_kwargs = _downsample(\n self.samples, number, extra_kwargs=self.extra_kwargs\n )", "def _subsample_frames(self, video_clip_frames):\n subsampled_frames = []\n current_ix = 0\n step_size = len(video_clip_frames) / float(config.RGB_N_FRAMES)\n for _ in range(config.RGB_N_FRAMES):\n frame = video_clip_frames[int(current_ix)]\n subsampled_frames.append(frame)\n current_ix += step_size\n\n return np.array(subsampled_frames)", "def downsample(time_series,res = '0.2S'):\n\n Nvalues = len(time_series.index)\n samplerate = 1/ ((time_series.timestamp[Nvalues-1] - time_series.timestamp[0]) / Nvalues)\n timestart = dt.datetime(1970, 1, 1, 0, 0, 0, 0) #dt.datetime.now()\n start = pd.Timestamp(timestart)\n end = pd.Timestamp(timestart + dt.timedelta(seconds=Nvalues/samplerate))\n t = np.linspace(start.value, end.value, Nvalues)\n t = pd.to_datetime(t)\n time_series['time'] = t\n time_series = time_series.resample(res,on='time').mean() # downsample to 0.2 second intervals\n time_series.index.name = 'time'\n time_series.reset_index(inplace=True)\n return time_series", "def test_downsample_raises_error_greater_output_fps():\n with pytest.raises(\n ValueError,\n match=r'Output FPS can\\'t be greater than input FPS'):\n downsample(np.arange(10), 1, 5)", "def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)", "def subsample(y, limit=256, factor=2):\n if len(y) > limit:\n return y[::factor].reset_index(drop=True)\n return y", "def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")", "def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()", "def subsample(df, freq=2):\n df = df.iloc[::freq, :]\n\n return df", "def downsampling(x_train, y_train, random_state=42):\n sampling = pd.concat([x_train, y_train], axis=1)\n big = sampling[y_train == y_train.value_counts().index[0]]\n small = sampling[y_train == y_train.value_counts().index[1]]\n\n downsampled = resample(big,\n replace=False,\n n_samples=len(small),\n random_state=random_state)\n downsampled = pd.concat([downsampled, small])\n x_train_bal = downsampled[downsampled.columns.values[:-1]]\n y_train_bal = downsampled[downsampled.columns.values[-1]]\n\n del sampling, big, small, downsampled\n return x_train_bal, y_train_bal", "def sample_generator(self, data, index):\r\n out = []\r\n frames = data[\"video\"]\r\n for speed_idx, speed in enumerate(self.speed_set):\r\n # generate all the samples according to the speed set\r\n num_input_frames, h, w, c = frames.shape\r\n frame_idx = random.randint(0, num_input_frames-1)\r\n selected_frame = frames[frame_idx] # H, W, C\r\n\r\n # standardize the frame size\r\n if self.cfg.PRETRAIN.FRAME_SIZE_STANDARDIZE_ENABLE: \r\n selected_frame = self.frame_size_standardize(selected_frame)\r\n \r\n # generate the sample index \r\n h, w, c = selected_frame.shape\r\n speed_x, speed_y = speed\r\n start_x, end_x = self.get_crop_params(speed_x/(self.num_speeds//2), w)\r\n start_y, end_y = self.get_crop_params(speed_y/(self.num_speeds//2), h)\r\n intermediate_x = (torch.linspace(start_x, end_x, self.num_frames).long()).clamp_(0, w-self.crop_size)\r\n intermediate_y = (torch.linspace(start_y, end_y, self.num_frames).long()).clamp_(0, h-self.crop_size)\r\n \r\n frames_out = torch.empty(\r\n self.num_frames, self.crop_size, self.crop_size, c, device=frames.device, dtype=frames.dtype\r\n )\r\n\r\n for t in range(self.num_frames):\r\n frames_out[t] = selected_frame[\r\n intermediate_y[t]:intermediate_y[t]+self.crop_size, intermediate_x[t]:intermediate_x[t]+self.crop_size, :\r\n ]\r\n\r\n # performs augmentation on the generated image sequence\r\n if self.transform is not None:\r\n frames_out = self.transform(frames_out)\r\n \r\n # applies static mask\r\n if self.static_mask_enable:\r\n frames_out = self.static_mask(frames_out)\r\n out.append(frames_out)\r\n out = torch.stack(out)\r\n data[\"video\"] = out\r\n return data", "def split_into_frames(filename_raw, thr_var_per_event=5e-4, downsampling_factor=2, disable_display=False,\n filename_output_video=None):\n\n assert downsampling_factor == int(downsampling_factor), \"Error: downsampling_factor must be an integer\"\n assert downsampling_factor >= 0, \"Error: downsampling_factor must be >= 0\"\n\n mv_adaptive_rate_iterator = AdaptiveRateEventsIterator(input_path=filename_raw,\n thr_var_per_event=thr_var_per_event,\n downsampling_factor=downsampling_factor)\n\n height, width = mv_adaptive_rate_iterator.get_size()\n\n if filename_output_video == None:\n video_process = None\n else:\n assert not os.path.exists(filename_output_video)\n video_process = FFmpegWriter(filename_output_video)\n\n if video_process or not disable_display:\n img_bgr = np.zeros((height, width, 3), dtype=np.uint8)\n\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n\n for events in mv_adaptive_rate_iterator:\n assert events.size > 0\n start_ts = events[0][\"t\"]\n end_ts = events[-1][\"t\"]\n print(\"frame: {} -> {} delta_t: {} fps: {} nb_ev: {}\".format(start_ts, end_ts,\n end_ts - start_ts,\n 1e6 / (end_ts - start_ts),\n events.size))\n if video_process or not disable_display:\n img = events_to_diff_image(events, sensor_size=(height, width))\n img_bgr[...] = 0\n img_bgr[img < 0, 0] = 255\n img_bgr[img > 0, 1] = 255\n\n chunk_start_ts = events[0][\"t\"]\n chunk_end_ts = events[-1][\"t\"]\n delta_t_frame = chunk_end_ts - chunk_start_ts + 1\n frame_txt = \"ts: {} -> {} delta_t: {} fps: {} (nb_ev): {}\".format(chunk_start_ts, chunk_end_ts,\n delta_t_frame,\n int(1.e6/delta_t_frame),\n events.size)\n img_bgr[20:45, ...] = 0\n cv2.putText(img_bgr,\n frame_txt,\n (int(0.05 * width), 40),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 200, 100))\n\n if video_process:\n video_process.writeFrame(img_bgr.astype(np.uint8)[..., ::-1])\n if not disable_display:\n cv2.imshow(\"img\", img_bgr)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if video_process:\n video_process.close()\n if not disable_display:\n cv2.destroyAllWindows()", "def _set_number_of_subsamples(self, number_of_subsamples):\n self._number_of_subsamples = number_of_subsamples\n self._compute_down_sample_factor()", "def _my_downsample(signal, timestamps, sr, desired_downsample_rate):\n\n # figure out our decimate factor. Must be int, so we'll try to get as close\n # as possible to the desired rate. Will not be exactly.\n ts_diff_sec = np.mean(np.diff(timestamps)) / 1e6\n dec_factor = np.floor(1. / (desired_downsample_rate * ts_diff_sec))\n\n # new sampling rate\n ts_diff_down = dec_factor * ts_diff_sec\n new_sr = 1. / ts_diff_down\n\n # apply a low pass filter before decimating\n low_pass_freq = new_sr / 2.\n [b, a] = butter(4, low_pass_freq / (sr / 2), 'lowpass')\n signals_low_pass = filtfilt(b, a, signal, axis=0)\n\n # now decimate\n inds = np.arange(0, len(signals_low_pass), dec_factor, dtype=int)\n new_sigals = signals_low_pass[inds]\n new_ts = timestamps[inds]\n return new_sigals, new_ts, new_sr", "def bottom_block(downsample_f8, bottleneck, hg_id, num_channels):\n hg_name = 'hg' + str(hg_id)\n\n downsample_f8_short = bottleneck(downsample_f8, num_channels, hg_name+\"_downsample_f8_short\")\n\n _x = bottleneck(downsample_f8, num_channels, hg_name+\"_downsample_f8_1\")\n _x = bottleneck(_x, num_channels, hg_name+\"_downsample_f8_2\")\n _x = bottleneck(_x, num_channels, hg_name+\"_downsample_f8_3\")\n\n upsample_f8 = Add()([_x, downsample_f8_short])\n\n return upsample_f8", "def subsampling(dat: pd.DataFrame):\n if dat.shape[0] > 10000:\n return dat.sample(n=10000, random_state=1).reset_index(drop=True)\n else:\n return dat", "def downsample(X, y, seed):\n rus = RandomUnderSampler(random_state=seed)\n X_resampled, y_resampled = rus.fit_resample(X, y)\n return X_resampled, y_resampled", "def test_resample_weather_15_min_downsample(self):\n actual = timeseries.resample_timeseries(ts=self.weather_data,\n interval_str='15Min')\n\n pd.testing.assert_frame_equal(actual, self.expected_data)", "def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signal.kaiser(WINDOW_SIZE, 5)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor", "def getUnscaledSamples(self, **kwargs) -> TimeData:\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n\n # get the files to read and the samples to take from them, in the correct order\n dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(\n options[\"startSample\"], options[\"endSample\"]\n )\n numSamples = options[\"endSample\"] - options[\"startSample\"] + 1\n # set up the dictionary to hold the data\n data = {}\n for chan in options[\"chans\"]:\n data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)\n\n # loop through chans and get data\n sampleCounter = 0\n for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):\n # get samples - this is inclusive\n dSamples = sToRead[1] - sToRead[0] + 1\n # spam files always record 5 channels\n dSamplesRead = dSamples * self.recChannels[dFile]\n # read the data\n byteOff = (\n self.dataByteOffset[dFile]\n + sToRead[0] * self.recChannels[dFile] * self.dataByteSize\n )\n dFilePath = os.path.join(self.dataPath, dFile)\n dataRead = np.memmap(\n dFilePath,\n dtype=self.dtype,\n mode=\"r\",\n offset=byteOff,\n shape=(dSamplesRead),\n )\n # now need to unpack this\n for chan in options[\"chans\"]:\n # check to make sure channel exists\n self.checkChan(chan)\n # get the channel index - the chanIndex should give the right order in the data file\n # as it is the same order as in the header file\n chanIndex = self.chanMap[chan]\n # use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value\n # scale by the lsb scalar here - note that these can be different for each file in the run\n data[chan][sampleCounter : sampleCounter + dSamples] = (\n dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]\n * scalar[chan]\n )\n # increment sample counter\n sampleCounter = sampleCounter + dSamples # get ready for the next data read\n\n # return data\n startTime, stopTime = self.sample2time(\n options[\"startSample\"], options[\"endSample\"]\n )\n comments = []\n comments.append(\n \"Unscaled data {} to {} read in from measurement {}, samples {} to {}\".format(\n startTime,\n stopTime,\n self.dataPath,\n options[\"startSample\"],\n options[\"endSample\"],\n )\n )\n comments.append(\"Data read from {} files in total\".format(len(dataFilesToRead)))\n comments.append(\n \"Data scaled to mV for all channels using scalings in header files\"\n )\n comments.append(\"Sampling frequency {}\".format(self.getSampleFreq()))\n return TimeData(\n sampleFreq=self.getSampleFreq(),\n startTime=startTime,\n stopTime=stopTime,\n data=data,\n comments=comments,\n )", "def samples(self):\n pass", "def calcFrameRate(self):\n\n tot = 0\n count = 0\n for session in self.sessions:\n for sample in session.samples:\n if not sample.isLoading:\n tot += sample.fps\n count += 1\n if count:\n self.avgFps = tot / count\n self.lowFps = (self.avgFps < 10)\n self.highFps = (self.avgFps > 25)", "def _downsample(x):\n return nn.AvgPool2d(kernel_size=2)(x)", "def downsample_data(\n downsampled_frac,\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n):\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(fore_train_op),\n int(len(fore_train_op) * downsampled_frac),\n replace=False,\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(fore_valid_op),\n int(len(fore_valid_op) * downsampled_frac),\n replace=False,\n )\n\n fore_train_ip = [x[tmp_tr_id] for x in fore_train_ip]\n fore_train_op = fore_train_op[tmp_tr_id]\n fore_valid_ip = [x[tmp_val_id] for x in fore_valid_ip]\n fore_valid_op = fore_valid_op[tmp_val_id]\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(train_op), int(len(train_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(valid_op), int(len(valid_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_test_id = np.random.choice(\n len(test_op), int(len(test_op) * downsampled_frac), replace=False\n )\n\n train_ip = [x[tmp_tr_id] for x in train_ip]\n train_op = train_op[tmp_tr_id]\n valid_ip = [x[tmp_val_id] for x in valid_ip]\n valid_op = valid_op[tmp_val_id]\n test_ip = [x[tmp_test_id] for x in test_ip]\n test_op = test_op[tmp_test_id]\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n )", "def downsample_panel(minute_rp, daily_rp, mkt_close):\n\n cur_panel = minute_rp.get_current()\n sids = minute_rp.minor_axis\n day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)\n dt1 = trading.environment.normalize_date(mkt_close)\n dt2 = trading.environment.next_trading_day(mkt_close)\n by_close = functools.partial(get_date, mkt_close, dt1, dt2)\n for item in minute_rp.items:\n frame = cur_panel[item]\n func = get_sample_func(item)\n # group by trading day, using the market close of the current\n # day. If events occurred after the last close (yesterday) but\n # before today's close, group them into today.\n dframe = frame.groupby(lambda d: by_close(d)).agg(func)\n for stock in sids:\n day_frame[stock][item] = dframe[stock].ix[dt1]\n # store the frame at midnight instead of the close\n daily_rp.add_frame(dt1, day_frame)", "def reduce_sample_size(data,classes,times=2):\n data=data[range(0,data.shape[0],times)]\n classes=classes[range(0,classes.shape[0],times)]\n return data,classes", "def over_sample(self) -> float:\n return self._over_sample", "def over_sample(self) -> float:\n return self._over_sample", "def spectrum(datapath, run, forcebins = False):\n runpath = datapath + '/' + run\n events = [evnt for evnt in listdir(runpath) if not isfile(join(runpath,evnt))]\n allTraces = []\n total_time = 0\n pulses = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n times = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n #camextratime = 25e-6\n for event in events:\n if int(event)> 3:\n break\n print(event)\n e = sbc.DataHandling.GetSBCEvent.GetEvent(runpath,event)\n if e[\"slowDAQ\"][\"loaded\"]:\n #print(e[\"fastDAQ\"].keys())\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n #dcam = np.diff(cgate)\n fdt = e['fastDAQ']['time']\n #camOffTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]))\n \n #camOnTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] < 0.5]))\n fddt = fdt[1]-fdt[0]\n tfast = fdt[-1]-fdt[0]\n LED_on = [fdt[i] for i in range(len(cgate)) if cgate[i]<-0.5]\n blockedFraction = ((len(LED_on)*fddt))/tfast\n print(blockedFraction)\n tr = e[\"PMTtraces\"]\n trac = tr[\"traces\"]\n dt = tr[\"dt\"]\n #event_time = (tr['t0_sec'][-1]+tr['t0_frac'][-1]-tr['t0_sec'][0] - tr['t0_frac'][0])[0]\n event_time = (((e[\"slowDAQ\"][\"elapsed_time\"][-1]-e[\"slowDAQ\"][\"elapsed_time\"][0]))*(1-blockedFraction))\n #print(event_time)\n total_time += event_time\n\n #f,axes = plt.subplots(1,5,sharey=True)\n #f.suptitle(runpath+\"/\"+str(event))\n #pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n #d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n #pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n #tracetimes = pmttracetime - pmtalign\n #camoffindex = 0\n #camonindex = 0\n for i in range(len(trac)):\n #print(i)\n \"\"\"\n thistracetime = tracetimes[i]\n \n #nearestcamoff = min(camOffTimes, key=lambda x:abs(x-thistracetime))\n #nearestcamon = min(camOnTimes, key=lambda x:abs(x-thistracetime))\n print(camOffTimes[camoffindex])\n print(thistracetime)\n if thistracetime > camOffTimes[camoffindex]:\n camoffindex += 1\n if thistracetime > camOnTimes[camonindex]:\n camonindex += 1 \n if camoffindex<len(camOffTimes)-1:\n if abs(camOffTimes[camoffindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera off')\n continue\n if camonindex<len(camOnTimes)-1:\n if abs(camOnTimes[camonindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera on')\n continue\n \"\"\"\n trace = np.fabs(trac[i][0])\n if max(trace) == 128:\n trace = stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt_tr = dt[i][0]\n\n # populate dictionaries arrays based on how many pulses there were\n [a,n,totInt,pktimes] = SBC_pulse_integrator_bressler(trace,dt_tr)\n if n == 0:\n number = 'zero'\n allTraces.append(a)\n elif n == 1:\n number = 'one'\n allTraces.append(a)\n times['one'].append(pktimes[0])\n elif n == 2:\n number = 'two'\n allTraces.append(a)\n elif n == 3:\n number = 'three'\n allTraces.append(a)\n else:\n number = 'other'\n allTraces.append(a)\n \"\"\"\n #if a != None:\n if isZero:\n if j < 5:\n if isNegative:\n if random() >0:\n print(runpath+\"/\"+str(event)+\" pmt trace \"+str(i))\n tPMT = np.arange(len(trace))*dt_tr\n axes[j].plot(tPMT,trace,lw=3)\n axes[j].set_xlabel(\"time (s)\",fontsize=25)\n axes[j].set_ylabel(\"PMT response (ADC)\",fontsize=25)\n j+=1\n \n \n plt.show\n \"\"\"\n pulses[number].append(a)\n gc.collect()\n \n \n for k in pulses:\n pulses[k] = [x for x in pulses[k] if x != None]\n \n allTraces = [x for x in allTraces if x != None]\n \n plt.figure()\n\n Nbins = int(np.floor(np.sqrt(len(allTraces))))\n allvals, bins, _ = plt.hist(allTraces,Nbins,label='all traces')\n \n areaVals = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n for k in pulses:\n if k != 'other':\n areaVals[k], _, _ = plt.hist(pulses[k],bins,histtype = 'step',\n linewidth = 3,label= k+' hits')\n plt.legend(fontsize=12)\n plt.show() \n #spe_spectrum = areaVals['one']\n \n #def gaussian(x,mu,sigma,amplitude):\n # return amplitude * np.exp(-((x - mu) /(np.sqrt(2)* sigma))**2 )\n \n #params_spe, params_cov_spe = scipy.optimize.curve_fit(gaussian,bins[:len(areaVals['one'])],\n # spe_spectrum,\n # p0=[0.4e8,1e7,40])\n #params_twohits, params_cov_twohits = scipy.optimize.curve_fit(gaussian,\n # bins[:len(areaVals['two'])],\n # areaVals['two'],\n # p0=[0.8e8,1e7,10])\n #mu_spe = params_spe[0]\n #mu_2 = params_twohits[0] - mu_spe\n #print(mu_spe)\n #print(mu_2)\n \n #mu_avg = (mu_spe + mu_2)*0.5\n #mu_avg = get_gain(datapath,run)\n mu_avg = 1e7\n print(mu_avg)\n\n \n plt.figure()\n plt.grid(True)\n if isinstance(forcebins,np.ndarray):\n bins=forcebins\n fullspect,_,_=plt.hist([t/mu_avg for t in allTraces],\n forcebins,label='all traces')\n \n else:\n fullspect,bins,_=plt.hist([t/mu_avg for t in allTraces],\n int(np.floor(np.sqrt(len(allTraces)))),label='all traces')\n \n #print(bins)\n plt.yscale('log')\n plt.xlabel('phe based on a gain of '+str(mu_avg)+' electrons per phe')\n plt.legend()\n plt.show\n print(sum(fullspect)/total_time)\n print(\"The Total Exposure Time of run \"+str(runpath)+ \" was \"+str(total_time)+\" Seconds\")\n print(\"The overall PMT trigger rate was \" + str(len(allTraces)/total_time)+ \"Hz\")\n return [fullspect,bins,total_time]", "def filter(self, newer_than=None):\r\n if newer_than is None:\r\n newer_than = self._clock.time() - self._window.as_(Time.SECONDS)\r\n self._samples = [sample for sample in self._samples if sample[0] >= newer_than]", "def hand_samples(data,lfs,rfs,start_frame,cap,show=False):\n data['lhkpss'] = hand.translate_hand_kps(data['lhkpss'],data['kpss'],7);\n data['rhkpss'] = hand.translate_hand_kps(data['rhkpss'],data['kpss'],4);\n row1 = [];\n row2 = [];\n for f in lfs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,frame = cap.read();\n data['crop'] = ut.crop(frame,256,320);\n data['i'] = f;\n row1.append(draw_lh_lines(data)[105:-65,105:-65]);\n for f in rfs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,frame = cap.read();\n data['crop'] = ut.crop(frame,256,320);\n data['i'] = f;\n row2.append(draw_rh_lines(data)[65:-105,65:-105]);\n row1 = np.concatenate(row1,axis=1);\n row2 = np.concatenate(row2,axis=1);\n grid = np.concatenate([row1,row2]);\n if show:\n ut.show(grid);\n return grid;", "def downsampling(self, factor=10, first=3, switch=True):\r\n newprefix = self.name_prefix + 'down'\r\n for name in CMADataLogger.names:\r\n f = open(newprefix+name+'.dat','w')\r\n iline = 0\r\n cwritten = 0\r\n for line in open(self.name_prefix+name+'.dat'):\r\n if iline < first or iline % factor == 0:\r\n f.write(line)\r\n cwritten += 1\r\n iline += 1\r\n f.close()\r\n print('%d' % (cwritten) + ' lines written in ' + newprefix+name+'.dat')\r\n if switch:\r\n self.name_prefix += 'down'\r\n return self", "def downsampling(self, factor=10, first=3, switch=True):\r\n newprefix = self.name_prefix + 'down'\r\n for name in CMADataLogger.names:\r\n f = open(newprefix+name+'.dat','w')\r\n iline = 0\r\n cwritten = 0\r\n for line in open(self.name_prefix+name+'.dat'):\r\n if iline < first or iline % factor == 0:\r\n f.write(line)\r\n cwritten += 1\r\n iline += 1\r\n f.close()\r\n print('%d' % (cwritten) + ' lines written in ' + newprefix+name+'.dat')\r\n if switch:\r\n self.name_prefix += 'down'\r\n return self", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def sample(self, verbose=False):\n\t\tfor i in range(self.nsamp): self.subsample(verbose)\n\t\treturn self.amps, self.pos, self.irads", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def samples_per_frame(self, value):\n self._samples_per_frame = value", "def downsample_AccSignals(signal, num): #signal (128,3)\n return resample(signal, num, axis=0)", "def _downsample(samples, number, extra_kwargs=None):\n from pesummary.utils.utils import resample_posterior_distribution\n import copy\n\n _samples = np.array(samples).T\n if number > len(_samples[0]):\n raise ValueError(\n \"Failed to downsample the posterior samples to {} because \"\n \"there are only {} samples stored in the file.\".format(\n number, len(_samples[0])\n )\n )\n _samples = np.array(resample_posterior_distribution(_samples, number))\n if extra_kwargs is None:\n return _samples.T.tolist()\n _extra_kwargs = copy.deepcopy(extra_kwargs)\n _extra_kwargs[\"sampler\"][\"nsamples\"] = number\n return _samples.T.tolist(), _extra_kwargs", "def next(self):\n\n if self.i_sample < self.n_sample:\n df_batch = self.grouped[self.i_sample:min(self.n_sample, self.i_sample + self.batch_size)]\n # at end of epoch, number of sample remains may be smaller than batch size\n if len(df_batch) < self.batch_size:\n df_sample = random.sample(self.grouped, self.batch_size-len(df_batch))\n df_batch = df_batch + df_sample\n try:\n assert len(df_batch) == self.batch_size\n except AssertionError:\n print(self.i_sample, df_sample, df_batch)\n\n # get random frame_idxs\n if self.train:\n flips = np.random.choice(a=[False, True], size=(self.batch_size,), p=[0.5, 0.5])\n else:\n flips = np.zeros(self.batch_size, dtype=bool)\n\n\n video = sample_clips(df_batch, flips, self.batch_size, self.n_frame,\n self.scale_w, self.scale_h, self.sample_half_time, self.train)\n\n bboxes = np.zeros((self.batch_size, self.n_frame // self.temporal_scale, self.n_bbox, 5))\n labels = np.zeros((self.batch_size, self.n_bbox, self.num_class))\n for i in range(len(df_batch)):\n tmp_bbox, tmp_label = self.get_bbox_and_label(df_batch[i], flips[i], i, self.scale_w, self.scale_h)\n bboxes[i] = tmp_bbox\n labels[i] = tmp_label\n\n if self.debug_dataloader:\n with open('dataset/AVA_v2.1/ava_action_list_v2.1.pbtxt') as fd:\n lines = fd.readlines()\n\n labels_info = []\n for i in range(80):\n name_line = lines[i * 5 + 1]\n label_id_line = lines[i * 5 + 2]\n label_type_line = lines[i * 5 + 3]\n\n name = name_line[name_line.find('\"') + 1:name_line.rfind('\"')]\n label_id = int(label_id_line.strip().split(':')[1].strip())\n label_type = label_type_line.strip().split(':')[1].strip()\n\n assert label_id == i + 1\n labels_info.append({\n 'name': name,\n 'label_type': label_type\n })\n\n for bidx in range(self.batch_size):\n s_video = video[bidx, ...]\n s_bboxes = bboxes[bidx, ...]\n s_labels = labels[bidx, ...]\n\n window_name = 'batch_idx_'+str(bidx)\n if self.train:\n window_name += '_train'\n else:\n window_name += '_val'\n\n\n bbox = s_bboxes[0, 0, 1:].astype(np.int32)\n label_indices = np.where(s_labels[0, :])[0]\n\n for fidx in range(self.n_frame):\n # print('fidx', fidx)\n save_name = window_name + '_' + str(fidx)\n tmp_img = (s_video[:, fidx, :, :].transpose((1,2,0))).astype(np.uint8).copy()\n\n cv2.rectangle(tmp_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=2)\n for en_idx, label_index in enumerate(label_indices):\n # print('label_index', label_index, 'len', len(labels_info))\n cv2.putText(tmp_img, labels_info[label_index]['name'], (bbox[0], bbox[1] + en_idx * 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color=(0, 255, 0), thickness=1)\n\n cv2.imwrite(save_name+'.jpg', tmp_img)\n\n\n #print(video.shape, bboxes.shape, labels.shape)\n ret = mx.io.DataBatch(data=[mx.nd.array(video), mx.nd.array(bboxes)],\n label=[mx.nd.array(labels),],\n provide_data=self.provide_data,\n provide_label=self.provide_label)\n\n self.i_sample += self.batch_size\n return ret\n else:\n raise StopIteration", "def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()", "def zoom_down(frames, n):\n h, w, r = frames.shape # h:height w:width r:ret\n small_frames = cv2.resize(frames, (w / n, h / n), interpolation=cv2.INTER_CUBIC)\n return small_frames", "def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, beta=5.0, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signallib.kaiser(WINDOW_SIZE, beta)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signallib.firwin(2 * half_len + 1, f_c, window=('kaiser', beta))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor", "def test_2d_freq_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/2d_pipe/test.ft2\")\n assert data.shape == (2048, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == 1601.83\n assert round(data[10,22],2) == 3079.44\n lowmem_write_readback(dic,data)\n check_ppm_limits(dic,data,0,[174.84, 65.21])\n check_ppm_limits(dic,data,1,[253.90, -143.80])", "def downsample(self, target, **kwargs):\n df_ds = downsample(\n self, sampling_freq=self.sampling_freq, target=target, **kwargs\n ).__finalize__(self)\n df_ds.sampling_freq = target\n\n if self.features is not None:\n ds_features = downsample(\n self.features, sampling_freq=self.sampling_freq, target=target, **kwargs\n )\n else:\n ds_features = self.features\n df_ds.features = ds_features\n return df_ds\n # return self.__class__(df_ds, sampling_freq=target, features=ds_features)", "def back_test(self, turnover_frequency):", "def downsample(self, tfactor=1, ffactor=1):\n if self.shape[0] % ffactor != 0:\n raise ValueError(\"Bad frequency factor given\")\n newnsamps = self.shape[1] - self.shape[1] % tfactor\n new_ar = np.empty(\n newnsamps * self.shape[0] // ffactor // tfactor, dtype=\"float32\",\n )\n ar = self.transpose().ravel().copy()\n lib.downsample(ar, new_ar, tfactor, ffactor, self.shape[0], newnsamps)\n new_ar = new_ar.reshape(\n newnsamps // tfactor, self.shape[0] // ffactor\n ).transpose()\n new_tsamp = self.header.tsamp * tfactor\n new_nchans = self.header.nchans // ffactor\n new_header = self.header.newHeader({\"tsamp\": new_tsamp, \"nchans\": new_nchans})\n return FilterbankBlock(new_ar, new_header)", "def extract_spike_features(time, current, voltage, start=0.1, end=0.7, fil=10):\n\n df = pd.DataFrame()\n df_related_features = pd.DataFrame()\n for c, curr in enumerate(current):\n current_array = curr * np.ones_like(time)\n start_index = (np.abs(time - start)).argmin() # Find closest index where the injection current starts\n end_index = (np.abs(time - end)).argmin() # Find closest index where the injection current ends\n current_array[:start_index] = 0\n current_array[end_index:len(current_array)] = 0\n EphysObject = efex.EphysSweepFeatureExtractor(t=time, v=voltage[:, c], i=current_array, start=start, \\\n end=end, filter=fil)\n EphysObject.process_spikes()\n\n # Adding peak_height (mV) + code for maximum frequency determination (see further)\n spike_count = 0\n if EphysObject._spikes_df.size:\n EphysObject._spikes_df['peak_height'] = EphysObject._spikes_df['peak_v'].values - \\\n EphysObject._spikes_df['threshold_v'].values\n spike_count = EphysObject._spikes_df['threshold_i'].values.size\n df = pd.concat([df, EphysObject._spikes_df], sort=True)\n\n # Some easily found extra features\n df_features = EphysObject._sweep_features\n\n # Adding spike count\n df_features.update({'spike_count': spike_count})\n\n # Adding spike frequency adaptation (ratio of spike frequency of second half to first half)\n SFA = np.nan\n half_stim_index = ft.find_time_index(time, np.float(start + (end - start) / 2))\n if spike_count > 5: # We only consider traces with more than 8.333 Hz = 5/600 ms spikes here\n # but in the end we only take the trace with the max amount of spikes\n\n if np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index) != 0:\n SFA = np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] > half_stim_index) / \\\n np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index)\n\n df_features.update({'SFA': SFA})\n\n # Adding current (pA)\n df_features.update({'current': curr})\n\n # Adding membrane voltage (mV)\n df_features.update({'resting_membrane_potential': EphysObject._get_baseline_voltage()})\n\n # Adding voltage deflection to steady state (mV)\n voltage_deflection_SS = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n # voltage_deflection_v, voltage_deflection_i = EphysObject.voltage_deflection() # = old way: max deflection\n df_features.update({'voltage_deflection': voltage_deflection_SS})\n\n # Adding input resistance (MOhm)\n input_resistance = np.nan\n if not ('peak_i' in EphysObject._spikes_df.keys()) and not curr == 0: # We only calculate input resistances\n # from traces without APs\n input_resistance = (np.abs(voltage_deflection_SS - EphysObject._get_baseline_voltage()) * 1000) / np.abs(\n curr)\n if input_resistance == np.inf:\n input_resistance = np.nan\n df_features.update({'input_resistance': input_resistance})\n\n # Adding membrane time constant (s) and voltage plateau level for hyperpolarisation paradigms\n # after stimulus onset\n tau = np.nan\n E_plat = np.nan\n sag_ratio = np.nan\n if curr < 0: # We use hyperpolarising steps as required in the object function to estimate the\n # membrane time constant and E_plateau\n while True:\n try:\n tau = EphysObject.estimate_time_constant() # Result in seconds!\n break\n except TypeError: # Probably a noisy bump for this trace, just keep it to be np.nan\n break\n E_plat = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n sag, sag_ratio = EphysObject.estimate_sag()\n df_features.update({'tau': tau})\n df_features.update({'E_plat': E_plat})\n df_features.update({'sag_ratio': sag_ratio})\n\n # For the rebound and sag time we only are interested in the lowest (-200 pA (usually)) hyperpolarisation trace\n rebound = np.nan\n sag_time = np.nan\n sag_area = np.nan\n\n if c == 0:\n baseline_interval = 0.1 # To calculate the SS voltage\n v_baseline = EphysObject._get_baseline_voltage()\n\n end_index = ft.find_time_index(time, 0.7)\n if np.flatnonzero(voltage[end_index:, c] > v_baseline).size == 0: # So perfectly zero here means\n # it did not reach it\n rebound = 0\n else:\n index_rebound = end_index + np.flatnonzero(voltage[end_index:, c] > v_baseline)[0]\n if not (time[index_rebound] > (end + 0.15)): # We definitely have 150 ms left to calculate the rebound\n rebound = ft.average_voltage(\n voltage[index_rebound:index_rebound + ft.find_time_index(time, 0.15), c], \\\n time[index_rebound:index_rebound + ft.find_time_index(time, 0.15)]) - v_baseline\n else: # Work with whatever time is left\n if time[-1] == time[index_rebound]:\n rebound = 0\n else:\n rebound = ft.average_voltage(voltage[index_rebound:, c], \\\n time[index_rebound:]) - v_baseline\n\n v_peak, peak_index = EphysObject.voltage_deflection(\"min\")\n v_steady = ft.average_voltage(voltage[:, c], time, start=end - baseline_interval, end=end)\n\n if v_steady - v_peak < 4: # The sag should have a minimum depth of 4 mV\n # otherwise we set sag time and sag area to 0\n sag_time = 0\n sag_area = 0\n else:\n # First time SS is reached after stimulus onset\n first_index = start_index + np.flatnonzero(voltage[start_index:peak_index, c] < v_steady)[0]\n # First time SS is reached after the max voltage deflection downwards in the sag\n if np.flatnonzero(voltage[peak_index:end_index, c] > v_steady).size == 0:\n second_index = end_index\n else:\n second_index = peak_index + np.flatnonzero(voltage[peak_index:end_index, c] > v_steady)[0]\n sag_time = time[second_index] - time[first_index]\n sag_area = -integrate.cumtrapz(voltage[first_index:second_index, c], time[first_index:second_index])[-1]\n\n burst_metric = np.nan\n # print(c)\n if spike_count > 5:\n burst = EphysObject._process_bursts()\n if len(burst) != 0:\n burst_metric = burst[0][0]\n\n df_features.update({'rebound': rebound})\n df_features.update({'sag_time': sag_time})\n df_features.update({'sag_area': sag_area})\n df_features.update({'burstiness': burst_metric})\n\n df_related_features = pd.concat([df_related_features, pd.DataFrame([df_features])], sort=True)\n\n return df, df_related_features", "def test_2d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/2d_pipe/test.fid\")\n assert data.shape == (332, 1500)\n assert data.dtype == 'complex64'\n assert round(data[0,1].real,2) == 360.07\n assert round(data[0,1].imag,2) == 223.20\n assert round(data[10,22].real,2) == -26.76\n assert round(data[10,22].imag,2) == 42.67\n lowmem_write_readback(dic,data)", "def prior_sample(self):\n pass", "def initial_sampling(y):\n samples = list(np.random.randint(0, len(y), 2))\n while len(np.unique(y[samples] > 0.5)) != 2:\n samples = list(np.random.randint(0, len(y), 2))\n return samples", "def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df", "def reconstruct_sample_top_layer(self,\n input_data,\n sampling_steps=100,\n sample_forward_backward=False):\n self.forward_propagate(input_data, sample_forward_backward)\n self.sample_top_layer(sampling_steps, None, True)\n self.backward_propagate(self.states[len(self.states) - 1], sample_forward_backward)\n return self.states[0]", "def downsample(y, u, n, nsper=None, keep=False):\n # axis to operate along\n axis = 0\n\n # filter and downsample\n # prime factor decomposition.\n for k in prime_factor(n):\n y = decimate(y, q=k, ftype='fir', axis=axis)\n\n # index for downsampling u\n sl = [slice(None)] * u.ndim\n sl[axis] = slice(None, None, n)\n u = u[sl]\n\n # Removal of the last simulated period to eliminate the edge effects\n # due to the low-pass filter.\n if not keep:\n y = y[..., :-1]\n u = u[..., :-1]\n\n return u, y", "def skybass_sampling_rates(data):\n for i in range(4):\n fig = plt.figure()\n TODO: finish", "def chunks(X, y, batch_size=32, augmentation_times=4, thickness=0,\n data_generator=ImageDataGenerator(dim_ordering=\"th\"), is_training=True):\n while 1:\n prct_pop, prct1 = 0.2, 0.2 # (1) of all the training set, how much we keep (2) % of 1's\n idx_1 = [i for i in range(len(y)) if y[i] == 1]\n idx_1 = random.sample(idx_1, int(prct_pop * len(idx_1)))\n idx_0 = [i for i in range(len(y)) if y[i] == 0]\n idx_0 = random.sample(idx_0, int(len(idx_1) / prct1))\n selected_samples = idx_0 + idx_1\n random.shuffle(selected_samples)\n logging.info(\"Final downsampled dataset stats: TP:%d, FP:%d\" % (\n sum(y[selected_samples]), len(y[selected_samples]) - sum(y[selected_samples])))\n\n i, good = 0, 0\n lenX = len(selected_samples)\n for X_batch, y_batch in data_generator.flow(X[selected_samples], y[selected_samples], batch_size=batch_size,\n shuffle=is_training):\n i += 1\n if good * batch_size > lenX * augmentation_times or i > 100: # stop when we have augmented enough the batch\n break\n if X_batch.shape[0] != batch_size: # ensure correct batch size\n continue\n good += 1\n yield X_batch, y_batch", "def subsample(y, lims):\n buckets = len(lims) - 1\n y_subs = np.zeros(buckets)\n for i in range(buckets):\n y_subs[i] = np.min(y[lims[i]:lims[i+1]])\n\n return y_subs", "def load_data(filename, sequence_length, start = None):\n \n if start == None:\n start = 0\n \n #Read the data file\n raw_data = pd.read_csv(filename, nrows = 20000 ,dtype = float).values\n \n #Change all zeros to the number before the zero occurs\n for x in range(0, raw_data.shape[0]):\n for y in range(0, raw_data.shape[1]):\n if(raw_data[x][y] == 0):\n raw_data[x][y] = raw_data[x-1][y]\n \n #Convert the file to a list\n data = raw_data.tolist()\n #Convert the data to a 3D array (a x b x c) \n #Where a is the number of days, b is the window size, and c is the number of features in the data file\n\n result = []\n for index in range(len(data) - sequence_length):\n result.append(data[index: index + sequence_length])\n \n# print (result[-1], len(result))\n #Normalizing data by going through each window\n #Every value in the window is divided by the first value in the window, and then 1 is subtracted\n\n d0 = np.array(result)\n# print(d0[0])\n dr = np.zeros_like(d0)\n dr[:,1:,:] = d0[:,1:,:] / d0[:,0:1,:] - 1\n \n #Keeping the unnormalized prices for Y_test\n #Useful when graphing bitcoin price over time later\n end = int(dr.shape[0])\n unnormalized_bases = d0[start:end + 1,0:1,4]\n \n print(\"Total dr shape\", dr.shape)\n \n #Splitting data set into training (First 90% of data points) and testing data (last 10% of data points)\n split_line = round(0.9 * dr.shape[0])\n training_data = dr[:int(split_line), :]\n \n #Shuffle the data\n np.random.shuffle(training_data)\n \n #Training Data\n #the 4 is the column of the data that we want to train for\n X_train = training_data[:, :-1]\n Y_train = training_data[:, -1]\n Y_train = Y_train[:, 4]\n \n #Testing data\n X_test = dr[int(split_line):, :-1]\n Y_test = dr[int(split_line):, 49, :]\n Y_test = Y_test[:, 4]\n\n #Get the day before Y_test's price\n Y_daybefore = dr[int(split_line):, 48, :]\n Y_daybefore = Y_daybefore[:, 4]\n \n #Get window size and sequence length\n sequence_length = sequence_length\n window_size = sequence_length - 1 #because the last value is reserved as the y value\n \n return X_train, Y_train, X_test, Y_test, Y_daybefore, unnormalized_bases, window_size", "def oversampling(self):\n return self._oversampling", "def rend_samples(data,start_frame,cap,fs=[30,51,78,90],show=False):\n row1 = [];\n row2 = [];\n row3 = [];\n for f in fs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,data['frame'] = cap.read();\n data['i'] = f;\n row1.append(extract_head(data));\n row2.append(data['plts']['y'][f]);\n render = cv2.imread(cf.renders_path + \"{:04d}.png\".format(f+1));\n render = cv2.resize(render,(256,256));\n row3.append(render);\n row1 = np.concatenate(row1,axis=1);\n row2 = np.concatenate(row2,axis=1);\n row3 = np.concatenate(row3,axis=1);\n grid = np.concatenate([row1,row2,row3]);\n if show:\n ut.show(grid);\n return grid;", "def _sample_single(self):\n ss = super(SuperResolutions, self)._sample_single()\n image = ss['data']\n # Down sample\n max_down_sample = max(self.data_down_sample, self.label_down_sample)\n if self.is_down_sample:\n images = []\n images.append(image)\n for i in range(max_down_sample):\n image = self.downsample(image)\n images.append(image)\n data = images[self.data_down_sample]\n label = images[self.label_down_sample]\n return {'data': data, 'label': label}", "def downsample(frames, ds_shape, dtype='float32', rescale=1., mean_tds=True):\n if dtype == 'uint8':\n assert rescale == 10.\n \n if np.prod(ds_shape) == 1:\n frames *= rescale\n if dtype == 'uint8':\n frames = frames.round().astype('uint8')\n return frames\n \n if ds_shape[1] * ds_shape[2] == 1:\n frames_ds = frames\n else:\n frames_ds = np.zeros((frames.shape[0],\n frames.shape[1]/ds_shape[1],\n frames.shape[2]/ds_shape[2]), \n dtype='float32')\n for i in range(frames.shape[0]):\n frames_ds[i] = cv2.resize(frames[i].astype('float32'), (0, 0), fx=1./ds_shape[1], fy=1./ds_shape[2],\n interpolation = cv2.INTER_AREA)\n if ds_shape[0] > 1:\n if mean_tds: \n frames_ds = frames_ds.reshape((frames_ds.shape[0]/ds_shape[0], ds_shape[0], \n frames_ds.shape[1], frames_ds.shape[2])).mean(axis=1)\n else:\n frames_ds = frames_ds[(frames_ds.shape[0]-1) % ds_shape[0] : : ds_shape[0]]\n frames_ds *= rescale\n assert dtype in ['float32', 'uint8']\n if dtype == 'uint8':\n frames_ds = frames_ds.round().astype('uint8')\n return frames_ds", "def run_dropout(x_t, key, keep_rate):\n ntime = x_t.shape[0]\n keys = random.split(key, ntime)\n return batch_dropout(x_t, keys, keep_rate)", "def _downsample(f):\n downx, downy = hl.funcs(\"downx downy\")\n downx[x, y, hl._] = (\n f[2 * x - 1, y, hl._]\n + 3.0 * (f[2 * x, y, hl._] + f[2 * x + 1, y, hl._])\n + f[2 * x + 2, y, hl._]\n ) / 8.0\n downy[x, y, hl._] = (\n downx[x, 2 * y - 1, hl._]\n + 3.0 * (downx[x, 2 * y, hl._] + downx[x, 2 * y + 1, hl._])\n + downx[x, 2 * y + 2, hl._]\n ) / 8.0\n return downy", "def _quantize(self):\n self._quantized_subsamples = [0] * self._number_of_subsamples\n for index, value in enumerate(self._subsamples):\n self._quantized_subsamples[index] = self._quantize_one_value(value)\n logging.debug('quantized down-samples: %r', self._quantized_subsamples)", "def hist_downsample(input, factor):\n factor = int(factor) # Converts downsampling factor to type 'int', enabling easier indexing\n downsampled_len = round(len(input) / factor) # Calculates integer length of downsampled data list\n downsampled = np.empty(downsampled_len) # Create empty list\n for i in range(0, downsampled_len):\n # Finding the start and end indices of the original data which corresponds to the downsampled index\n start_index = i * factor\n end_index = start_index + factor\n # Summing over these indices to generate the downsampled data point\n sum = 0\n for j in input[start_index:end_index]:\n sum += j\n\n downsampled[i] = sum # Assigns this new value to the downsampled list\n \n return np.array(downsampled) # Returns the downsampled list as a NumPy array", "def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def resample(self, data):\n # Resample signal\n if self._fs_out != self._fs_in:\n\n # Check that new sample rate is compatable with batch size\n if self._next_frame_idx != 0:\n raise ValueError('New sample rate incompatable with batch size.')\n\n # Input dimensions\n frames_in = data.shape[0] # samples\n len_data = float(frames_in) / self._fs_in # seconds\n\n # Output dimensions\n frames_out = int(np.round(len_data*self._fs_out)) # samples\n delta_out = 1.0 / self._fs_out # seconds\n\n # Predict next frame\n self._next_frame_idx = int(np.round(frames_out*delta_out*self._fs_in)) - \\\n frames_in\n\n # Compute indices to output\n idx_out = np.round(np.arange(frames_out)*delta_out*self._fs_in)\n\n # Sample data using output indices\n data_out = np.zeros((frames_out, data.shape[1]))\n for i in range(frames_out):\n idx = min(frames_in - 1, idx_out[i])\n data_out[i, :] = data[int(idx), :]\n\n else:\n\n # No resampling\n data_out = data\n\n return data_out", "def search_data(self, data_obj):\n logger.info(\"Start searching for coarse channel: %s\"%data_obj.header['coarse_chan'])\n self.logwriter.info(\"Start searching for %s ; coarse channel: %i \"%(data_obj.filename,data_obj.header['coarse_chan']))\n spectra, drift_indices = data_obj.load_data()\n tsteps = data_obj.tsteps\n tsteps_valid = data_obj.tsteps_valid\n tdwidth = data_obj.tdwidth\n fftlen = data_obj.fftlen\n nframes = tsteps_valid\n shoulder_size = data_obj.shoulder_size\n\n if self.flagging:\n ##EE This flags the edges of the PFF for BL data (with 3Hz res per channel).\n ##EE The PFF flat profile falls after around 100k channels.\n ##EE But it falls slowly enough that could use 50-80k channels.\n median_flag = np.median(spectra)\n# spectra[:,:80000] = median_flag/float(tsteps)\n# spectra[:,-80000:] = median_flag/float(tsteps)\n\n ##EE Flagging spikes in time series.\n time_series=spectra.sum(axis=1)\n time_series_median = np.median(time_series)\n mask=(time_series-time_series_median)/time_series.std() > 10 #Flagging spikes > 10 in SNR\n\n if mask.any():\n self.logwriter.info(\"Found spikes in the time series. Removing ...\")\n spectra[mask,:] = time_series_median/float(fftlen) # So that the value is not the median in the time_series.\n\n else:\n median_flag = np.array([0])\n\n # allocate array for findopplering\n # init findopplering array to zero\n tree_findoppler = np.zeros(tsteps * tdwidth,dtype=np.float64) + median_flag\n\n # allocate array for holding original\n # Allocates array in a fast way (without initialize)\n tree_findoppler_original = np.empty_like(tree_findoppler)\n\n # allocate array for negative doppler rates\n tree_findoppler_flip = np.empty_like(tree_findoppler)\n\n # build index mask for in-place tree doppler correction\n ibrev = np.zeros(tsteps, dtype=np.int32)\n\n for i in range(0, tsteps):\n ibrev[i] = bitrev(i, int(np.log2(tsteps)))\n\n##EE: should double check if tdwidth is really better than fftlen here.\n max_val = max_vals()\n if max_val.maxsnr == None:\n max_val.maxsnr = np.zeros(tdwidth, dtype=np.float64)\n if max_val.maxdrift == None:\n max_val.maxdrift = np.zeros(tdwidth, dtype=np.float64)\n if max_val.maxsmooth == None:\n max_val.maxsmooth = np.zeros(tdwidth, dtype='uint8')\n if max_val.maxid == None:\n max_val.maxid = np.zeros(tdwidth, dtype='uint32')\n if max_val.total_n_hits == None:\n max_val.total_n_hits = 0\n\n #EE: Making \"shoulders\" to avoid \"edge effects\". Could do further testing.\n specstart = int(tsteps*shoulder_size/2)\n specend = tdwidth - (tsteps * shoulder_size)\n\n #--------------------------------\n #Stats calc\n self.the_mean_val, self.the_stddev = comp_stats(spectra.sum(axis=0))\n\n #--------------------------------\n #Looping over drift_rate_nblock\n #--------------------------------\n drift_rate_nblock = int(np.floor(self.max_drift / (data_obj.drift_rate_resolution*tsteps_valid)))\n\n##EE-debuging kk = 0\n\n for drift_block in range(-1*drift_rate_nblock,drift_rate_nblock+1):\n logger.debug( \"Drift_block %i\"%drift_block)\n\n #----------------------------------------------------------------------\n # Negative drift rates search.\n #----------------------------------------------------------------------\n if drift_block <= 0:\n\n #Populates the find_doppler tree with the spectra\n populate_tree(spectra,tree_findoppler,nframes,tdwidth,tsteps,fftlen,shoulder_size,roll=drift_block,reverse=1)\n\n # populate original array\n np.copyto(tree_findoppler_original, tree_findoppler)\n\n # populate neg doppler array\n np.copyto(tree_findoppler_flip, tree_findoppler_original)\n \n # Flip matrix across X dimension to search negative doppler drift rates\n FlipX(tree_findoppler_flip, tdwidth, tsteps)\n logger.info(\"Doppler correcting reverse...\")\n tt.taylor_flt(tree_findoppler_flip, tsteps * tdwidth, tsteps)\n logger.debug( \"done...\")\n \n complete_drift_range = data_obj.drift_rate_resolution*np.array(range(-1*tsteps_valid*(np.abs(drift_block)+1)+1,-1*tsteps_valid*(np.abs(drift_block))+1))\n for k,drift_rate in enumerate(complete_drift_range[(complete_drift_range<self.min_drift) & (complete_drift_range>=-1*self.max_drift)]):\n # indx = ibrev[drift_indices[::-1][k]] * tdwidth\n\n # DCP 2020.04 -- WAR to drift rate in flipped files\n if data_obj.header['DELTAF'] < 0:\n drift_rate *= -1\n\n indx = ibrev[drift_indices[::-1][(complete_drift_range<self.min_drift) & (complete_drift_range>=-1*self.max_drift)][k]] * tdwidth\n\n # SEARCH NEGATIVE DRIFT RATES\n spectrum = tree_findoppler_flip[indx: indx + tdwidth]\n\n # normalize\n spectrum -= self.the_mean_val\n spectrum /= self.the_stddev\n\n #Reverse spectrum back\n spectrum = spectrum[::-1]\n\n n_hits, max_val = hitsearch(spectrum, specstart, specend, self.snr, drift_rate, data_obj.header, fftlen, tdwidth, max_val, 0)\n info_str = \"Found %d hits at drift rate %15.15f\\n\"%(n_hits, drift_rate)\n max_val.total_n_hits += n_hits\n logger.debug(info_str)\n self.logwriter.info(info_str)\n\n #----------------------------------------------------------------------\n # Positive drift rates search.\n #----------------------------------------------------------------------\n if drift_block >= 0:\n\n #Populates the find_doppler tree with the spectra\n populate_tree(spectra,tree_findoppler,nframes,tdwidth,tsteps,fftlen,shoulder_size,\n roll=drift_block,reverse=1)\n\n # populate original array\n np.copyto(tree_findoppler_original, tree_findoppler)\n\n logger.info(\"Doppler correcting forward...\")\n tt.taylor_flt(tree_findoppler, tsteps * tdwidth, tsteps)\n logger.debug( \"done...\")\n if (tree_findoppler == tree_findoppler_original).all():\n logger.error(\"taylor_flt has no effect?\")\n else:\n logger.debug(\"tree_findoppler changed\")\n\n ##EE: Calculates the range of drift rates for a full drift block.\n complete_drift_range = data_obj.drift_rate_resolution*np.array(range(tsteps_valid*(drift_block),tsteps_valid*(drift_block +1)))\n\n for k,drift_rate in enumerate(complete_drift_range[(complete_drift_range>=self.min_drift) & (complete_drift_range<=self.max_drift)]):\n\n indx = ibrev[drift_indices[k]] * tdwidth\n\n #DCP 2020.04 -- WAR to drift rate in flipped files\n if data_obj.header['DELTAF'] < 0:\n drift_rate *= -1\n\n # SEARCH POSITIVE DRIFT RATES\n spectrum = tree_findoppler[indx: indx+tdwidth]\n\n # normalize\n spectrum -= self.the_mean_val\n spectrum /= self.the_stddev\n\n n_hits, max_val = hitsearch(spectrum, specstart, specend, self.snr, drift_rate, data_obj.header, fftlen, tdwidth, max_val, 0)\n info_str = \"Found %d hits at drift rate %15.15f\\n\"%(n_hits, drift_rate)\n max_val.total_n_hits += n_hits\n logger.debug(info_str)\n self.logwriter.info(info_str)\n\n # Writing the top hits to file.\n self.filewriter = tophitsearch(tree_findoppler_original, max_val, tsteps, nframes, data_obj.header, tdwidth,\n fftlen, self.max_drift,data_obj.obs_length, out_dir = self.out_dir,\n logwriter=self.logwriter, filewriter=self.filewriter, obs_info=self.obs_info)\n\n logger.info(\"Total number of candidates for coarse channel \"+ str(data_obj.header['coarse_chan']) +\" is: %i\"%max_val.total_n_hits)", "def Sleep(self):\n sleep(self.pSampling)", "def load_wav(wav_path, downsample, n_steps):\n data = scipy.io.wavfile.read(wav_path)[1]\n data = scipy.signal.decimate(data, downsample) \n out = np.zeros((1, n_steps))\n out[0, n_steps - np.shape(data)[0]:] = data\n return out", "def upsample(X, y, seed):\n ros = RandomOverSampler(random_state=seed)\n X_resampled, y_resampled = ros.fit_resample(X, y)\n return X_resampled, y_resampled", "def nearest_test_pulse(self):", "def hotspot_data(self, num_timestamps=5, remove_outliers=True):\n while True:\n video_id = random.choice(self.video_ids)\n hotspot_func = self.hotspot_function(video_id)\n thumbnails = [th for th in self.video_thumbnails(video_id)]\n\n # The beginning of the video usually has too many views.\n while remove_outliers and thumbnails and thumbnails[0][1] < 20:\n del thumbnails[0]\n\n if hotspot_func is None or len(thumbnails) < num_timestamps:\n continue\n\n while len(thumbnails) > num_timestamps:\n del thumbnails[random.randrange(len(thumbnails))]\n _, timestamps = zip(*thumbnails)\n yield list(thumbnails), [float(hotspot_func(t)) for t in timestamps]", "def experience_replay(self):\n s,a,r,sp,done = self.memory.sample(self.batch_size)\n # TODO: 5 lines missing.\n raise NotImplementedError(\"\")\n self.Q.fit(s, target=target)", "def samples(self):\n return self._values[:self.nsamples]", "def window_data(datax, datay, window_length, hop_size, sample_rate, test_size):\n sample_window_length = int(np.floor(window_length * sample_rate))\n sample_hop_size = int(np.floor(hop_size * sample_rate))\n\n X_train = np.empty((0, sample_window_length))\n X_test = np.empty((0, sample_window_length))\n y_train = np.array([])\n y_test = np.array([])\n\n for (index, row) in datax.items():\n sys.stdout.write(f\"\\r[-] Reading: {index} of {len(datax)} ({index / len(datax) * 100: .2f}%)\")\n sys.stdout.flush()\n\n windowed_row = np.empty((0, sample_window_length))\n target_row = np.array([])\n\n for start_pos in np.arange(0, len(row)-sample_window_length, sample_hop_size):\n window = datax.loc[index][start_pos:start_pos + sample_window_length]\n\n windowed_row = np.vstack((windowed_row, window))\n target_row = np.append(target_row, datay[index])\n\n midpoint = int(np.floor(len(windowed_row) * (1-test_size)))\n\n X_train = np.vstack((X_train, windowed_row[:midpoint]))\n X_test = np.vstack((X_test, windowed_row[midpoint:]))\n y_train = np.append(y_train, target_row[:midpoint])\n y_test = np.append(y_test, target_row[midpoint:])\n\n return X_train, X_test, y_train, y_test", "def subtrack_train(cur):\n assert cur[0].state_size >= cur[0].subtrack_maxlen, \"Track length is longer than input size\"\n op = cur[0]\n op.image_folder = 'image0'\n print(\"Round 0\")\n run_track(op)\n\n op = cur[1]\n for env in range(op.n_env - 1):\n op.image_folder = f'image{env+1}'\n print(f\"\\nRound {env+1}\")\n run_track(op)\n op.subtrack = False\n op.epoch = 50\n op.image_folder = 'imagefull'\n run_track(op)", "def loadtrainData_oversampling():\n pre_x = []\n pre_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n pre_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n pre_y.append(int(lineArr[-1]))\n ros = RandomOverSampler(random_state=0)\n sampl_x, sampl_y = ros.fit_sample(pre_x, pre_y)\n return np.mat(sampl_x), np.mat(sampl_y).transpose()" ]
[ "0.6455042", "0.61035407", "0.59346664", "0.58270806", "0.58128667", "0.57843024", "0.5715292", "0.57149154", "0.57001853", "0.5676207", "0.5668947", "0.559317", "0.5588073", "0.5547957", "0.5523909", "0.5510277", "0.5501954", "0.54938704", "0.54915655", "0.5463476", "0.543445", "0.5414946", "0.5383452", "0.537052", "0.53490114", "0.5330503", "0.5323642", "0.5315958", "0.53073895", "0.530439", "0.52918893", "0.52873087", "0.5272548", "0.52634346", "0.52553976", "0.5254986", "0.5247584", "0.5241679", "0.52308506", "0.52138025", "0.5188609", "0.5185888", "0.5185032", "0.5173113", "0.5171576", "0.5164287", "0.5164287", "0.51531726", "0.51521546", "0.5150266", "0.51411784", "0.51411784", "0.5134142", "0.5134142", "0.5134142", "0.5132143", "0.51315045", "0.5123969", "0.51187813", "0.51184195", "0.5111734", "0.5108226", "0.50977725", "0.5094172", "0.5090692", "0.5085428", "0.5072444", "0.5072382", "0.50679344", "0.5066858", "0.506584", "0.5060698", "0.5060587", "0.50374967", "0.5020572", "0.500653", "0.49978527", "0.49945295", "0.49930355", "0.4991002", "0.4990687", "0.49812856", "0.49784616", "0.49782944", "0.4975128", "0.49649552", "0.49637824", "0.49604818", "0.49585396", "0.49475816", "0.49447843", "0.49370483", "0.49369764", "0.49349615", "0.4932522", "0.49285072", "0.49264577", "0.49245793", "0.49213752", "0.49213207" ]
0.66121924
0
Downsamples fluorescence data to include approximately the top 1% of frames based on total increase in activity. Currently the threshold is set for 1000 neurons. Original code from
def downsample_fluorescence(F, thres=20, verbose=1): diff_F = np.diff(F, axis=1) sum_F = np.sum(diff_F, axis=0) F = F[:,:-1] if verbose > 0: print( 'Downsampling fluorescence data to {} frames using threshold {}' .format(np.sum(np.greater(sum_F, thres)))) return F[:, np.greater(sum_F, thres)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def determine_silence_threshold(self):\n loudest_sound_cohort_size = 0.2 # Top 20% are counted in the loudest sound group.\n silence_threshold_multiplier = 1.6 # Sounds must be at least 1.6x as loud as the loudest silence\n\n rospy.loginfo(\"Getting intensity values from mic.\")\n self.open_stream()\n tss = self.total_silence_samples\n values = [math.sqrt(abs(audioop.avg(self.stream.read(self.chunk_size), self.audio_format_width)))\n for _ in range(tss)]\n values = sorted(values, reverse=True)\n sum_of_loudest_sounds = sum(values[:int(tss * loudest_sound_cohort_size)])\n total_samples_in_cohort = int(tss * loudest_sound_cohort_size)\n average_of_loudest_sounds = sum_of_loudest_sounds / total_samples_in_cohort\n rospy.loginfo(\"Average audio intensity is %d\" % average_of_loudest_sounds)\n self.silence_threshold = average_of_loudest_sounds * silence_threshold_multiplier\n rospy.loginfo(\"Silence threshold set to %d \" % self.silence_threshold)\n self.close_stream()", "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)", "def main(threshold=100, normed_length=200):\n base_loc = DATA_DIR + '/raw/human_activity/RawData'\n labels_file_data = read_file_data(base_loc + '/labels.txt', int)\n\n X = []\n y = []\n\n last_experiment_number = None\n last_user_number = None\n for experiment_number, user_number, activity_number, start, end in labels_file_data:\n # There are 12 classes:\n # 1 Walking\n # 2 Walking upstairs\n # 3 Walking downstairs\n # 4 Sitting\n # 5 Standing\n # 6 Lieing down\n # 7 Standing to siting\n # 8 Sitting to standing\n # 9 Siting to lieing down\n # 10 Lieing down to sitting\n # 11 Standing to lieing down\n # 12 Lieing down to standing\n # But some have very few samples, and without them it's basically a balanced classification problem.\n if activity_number > 6:\n continue\n\n end += 1\n if experiment_number != last_experiment_number or user_number != last_user_number:\n acc_filename = 'acc_exp{:02}_user{:02}.txt'.format(experiment_number, user_number)\n gyro_filename = 'gyro_exp{:02}_user{:02}.txt'.format(experiment_number, user_number)\n acc_file_data = torch.tensor(read_file_data(base_loc + '/' + acc_filename, float))\n gyro_file_data = torch.tensor(read_file_data(base_loc + '/' + gyro_filename, float))\n # Is a tensor of shape (length, channels=6)\n both_data = torch.cat([acc_file_data, gyro_file_data], dim=1)\n last_experiment_number = experiment_number\n last_user_number = user_number\n\n # minimum length is 74\n # maximum length is 2032\n # I think what they did in the original dataset was split it up into pieces roughly 74 steps long. It's not\n # obvious that it's going to be that easy to learn from short series so here we split it up into pieces\n # 'normed_length' steps long, and apply fill-forward padding to the end if it's still at least of length\n # 'threshold'' and discard it if it's shorter. This doesn't affect much of our dataset.\n for start_ in range(start, end, normed_length):\n start_plus = start_ + normed_length\n if start_plus > end:\n too_short = True\n if start_plus - end < threshold:\n continue # skip data\n end_ = min(start_plus, end)\n else:\n too_short = False\n end_ = start_plus\n Xi = both_data[start_:end_]\n if too_short:\n Xi = torch.cat([Xi, Xi[-1].repeat(start_plus - end, 1)], dim=0)\n X.append(Xi)\n y.append(activity_number - 1)\n X = torch.stack(X, dim=0)\n y = torch.tensor(y)\n return X, y", "def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)", "def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor", "def get_silence_threshold(sound, lower_quantile):\n soundint = sound.to_intensity()\n max_intensity = call(soundint, 'Get quantile', 0.0, 0.0, 1)\n sil_intensity = call(soundint, 'Get quantile', 0.0, 0.0, lower_quantile)\n return sil_intensity - max_intensity", "def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)", "def gen_sensor_reward(self,MAX_UNCERTAINTY,window_size,window_lag):\n\n for i in range(0, len(self.tracker_object.tracks)):\n unormalized_uncertainty = np.sum(self.tracker_object.tracks[i].p_k_k.diagonal())\n self.uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty)\n\n\n this_uncertainty = []\n [this_uncertainty.append(self.uncertainty[x][-1]) for x in range(0, len(self.tracker_object.tracks))]\n\n self.avg_uncertainty.append(np.mean(this_uncertainty))\n\n if len(self.avg_uncertainty) < window_size + window_lag:\n self.reward.append(0)\n else:\n current_avg = np.mean(self.avg_uncertainty[-window_size:])\n prev_avg = np.mean(self.avg_uncertainty[-(window_size + window_lag):-window_lag])\n if current_avg < prev_avg or self.avg_uncertainty[-1] < .1:\n # if current_avg < prev_avg:\n self.reward.append(1)\n else:\n self.reward.append(0)", "def discard_samples(chain_length):\n return min(chain_length / 10, MAX_GEN_DISCARD)", "def test_flmb(self):\n self.create_sample_data_set_dir(\"node10p1.dat\", TELEM_DIR, \"node59p1.dat\")\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,30)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,5,30)", "def downsample_spikes(S, thres=150, verbose=1):\n sum_S = np.sum(S, axis=0)\n if verbose > 0:\n print(\n 'Downsampling spike data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_S, thres)), thres))\n \n return S[:, np.greater(sum_S, thres)]", "def filter_samples(df, normal_samples, damaged_samples, assembly_samples, missing_samples, damaged_thread_samples,\n loosening_samples, move_samples):\n # Count the sample types\n count_df = df.groupby(['sample_nr'])['label'].median()\n unique, counts = np.unique(count_df, return_counts=True)\n labels_count_dict = {A: B for A, B in zip(unique, counts)}\n\n # Take only the amount of samples that's needed to fill the requirement\n sampled_list = []\n for label in labels_count_dict:\n subindex = list(np.unique(df.loc[df['label'] == label].index.get_level_values(0)))\n\n if label == 0:\n to_take = normal_samples * labels_count_dict[0]\n elif label == 1:\n to_take = damaged_samples * labels_count_dict[1]\n elif label == 2:\n to_take = assembly_samples * labels_count_dict[2]\n elif label == 3:\n to_take = missing_samples * labels_count_dict[3]\n elif label == 4:\n to_take = damaged_thread_samples * labels_count_dict[4]\n elif label == 5:\n to_take = loosening_samples * labels_count_dict[5]\n elif label == 6:\n to_take = move_samples * labels_count_dict[6]\n\n sample_ids = np.random.choice(subindex, int(to_take), replace=False)\n sampled_df = df[df.index.get_level_values(0).isin(sample_ids)]\n sampled_list.append(sampled_df)\n\n taken_data = pd.concat(sampled_list, ignore_index=False).sort_values(['sample_nr', 'event'])\n\n # Reset the sample numbers\n taken_data = taken_data.reset_index()\n taken_data['sample_nr'] = (taken_data['sample_nr'] != taken_data['sample_nr'].shift(1)).astype(int).cumsum()\n taken_data['event'] = taken_data.index\n taken_data = taken_data.set_index(['sample_nr', 'event'])\n taken_data = taken_data.reset_index('event', drop=True)\n taken_data = taken_data.set_index(taken_data.groupby(level=0).cumcount().rename('event'), append=True)\n taken_data = taken_data.sort_index()\n\n return taken_data", "def filter_by_freq(self, low=0.5, high=40):\n self.epochs.load_data()\n self.epochs.filter(l_freq=low, h_freq=high, picks = 'all')\n return self.epochs", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def estimate_max_dn(exposure, gain=1):\n return np.random.randint(100*exposure, 500*exposure)", "def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def subbandwidth(self):", "def oversampling_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n oversampling=True)\n evaluate_both(model)\n plotting.plot_metrics(history)", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def sample_low_rank(self, n_samples, mu, logvar, F):\n #F = torch.unsqueeze(F, dim=1).repeat(1, n_samples, 1, 1) # [self.batch_size, n_samples, self.Y_dim, self.rank]\n F = F.repeat(n_samples, 1, 1) # [self.batch_size*n_samples, self.Y_dim, self.rank]\n mu = mu.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n logvar = logvar.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n eps_low_rank = torch.randn(self.batch_size*n_samples, self.rank, 1)\n eps_diag = torch.randn(self.batch_size*n_samples, self.Y_dim)\n half_var = torch.exp(0.5*logvar) # [self.batch_size*n_samples, self.Y_dim]\n samples = torch.bmm(F, eps_low_rank).squeeze() + mu + half_var*eps_diag\n samples = samples.reshape(n_samples, self.batch_size, self.Y_dim)\n samples = samples.transpose(0, 1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples", "def get_threshold(ckt_path, threshold_nums, percentage):\n aug_classes = 5\n num_classes = 8\n torch.set_printoptions(precision=2, threshold=100000, linewidth=10000)\n\n # get dataloader\n mean_std_path = './data/mean_std.json'\n data_root = './data/'\n # loader dict:'train','valid', 'test'\n loader = get_dataloader(mean_std_path, data_root)\n\n copy_resnet18 = deepcopy(resnet18(pretrained=False))\n # model = Net1FC(copy_resnet18, all_classes).cuda()\n model = Net8FC(copy_resnet18, num_classes, aug_classes).cuda()\n\n # ckt_path = './backup/models/resnet180.09625'\n ckt = torch.load(ckt_path)\n model.load_state_dict(ckt['model'])\n\n model.eval()\n\n loss_list = [[] for i in range(8)]\n # _pred_list = []\n # _label_list = []\n with torch.no_grad():\n for index, (data, label) in tqdm(enumerate(loader['threshold'])):\n # _label_list.append(int(label))\n labels = torch.tensor([label for i in range(4)]).cuda()\n data = data.squeeze(1).cuda()\n data = torch.stack([data.clone(),\n data.clone().rot90(1, [1, 2]),\n data.clone().rot90(2, [1, 2]),\n data.clone().rot90(3, [1, 2])])\n\n output = model(data, labels, \"valid\")\n\n targets = torch.tensor([0, 1, 2, 3]).cuda()\n loss_list[label].append(cross_entropy(output[label], targets).item() / 4.0)\n\n # pred_label = np.argmin(val_loss)\n # _pred_list.append(int(pred_label))\n\n # val_conf_mat = conf_matrix(_pred_list, _label_list, 8, True, [i for i in range(8)])\n # cal_recall_precision(val_conf_mat, True, [i for i in range(8)])\n\n print(loss_list)\n\n threshold = []\n if threshold_nums == 'multi':\n # 若各分类器求一个阈值\n for i in range(8):\n length = len(loss_list[i])\n threshold.append(np.mean(loss_list[i].sort()[:length * percentage]))\n\n elif threshold_nums == 1:\n # 若所有分类器求一个阈值\n loss_list_in_one = []\n for loss in loss_list:\n loss_list_in_one.extend(loss)\n length = len(loss_list_in_one)\n threshold = np.mean(loss_list_in_one.sort()[:length * percentage])\n\n print(\"The threshold is:\", threshold)\n\n return threshold", "def test_large_import_recovered(self):\n self.create_sample_data_set_dir(\"DOS15908.DAT\", RECOV_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED,96,400)", "def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)", "def remove_low_info(X, max_frequency=0.99):\n selector = UniqueThreshold(max_frequency=max_frequency)\n return selector.fit_transform(X)", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signal.kaiser(WINDOW_SIZE, 5)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor", "def test_max_samples(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"max_samples\": 3,\n \"min_gradient\": -1,\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n for val in [0, 1, 2, 3, 2, 1]:\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n assert state.attributes[\"sample_count\"] == 3", "def __init__(self, \n sampling_rate,\n hop_size,\n fft_size,\n f0_floor,\n f0_ceil,\n uv_threshold=0,\n q1=-0.15):\n super(SourceLoss, self).__init__()\n\n self.cheaptrick = CheapTrick(sampling_rate=sampling_rate,\n hop_size=hop_size, \n fft_size=fft_size,\n f0_floor=f0_floor,\n f0_ceil=f0_ceil,\n uv_threshold=uv_threshold,\n q1=q1)\n self.loss = nn.MSELoss()", "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights) \n return logits_q", "def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits", "def majorityVoteSilence(y_Raw, amps, silenceClassNum):\n y_raw = y_Raw.copy()\n silenceThreshold = 1000\n majVotWindowLength = 2.0 #in seconds\n windowLength = 0.032\n frameLengthFloat = math.ceil(majVotWindowLength/windowLength)\n\n frameLength = int(frameLengthFloat)\n\n resArray = np.empty(y_raw.shape)\n\n n_frames = int(math.ceil(y_raw.shape[0]/frameLengthFloat))\n\n for i in range(n_frames):\n\n if ((i+1) * frameLength) < y_raw.shape[0]:\n\n tmpAmps = amps[(i * frameLength):(((i+1) * frameLength))]\n \n if tmpAmps.max() >= silenceThreshold:\n #if True:\n tmpArray = y_raw[(i * frameLength):(((i+1) * frameLength))]\n \n \"\"\" Get most frequent number in that frames: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):(((i+1) * frameLength))] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):(((i+1) * frameLength))] = silenceClassNum\n else:\n\n tmpAmps = amps[(i * frameLength):y_raw.shape[0]]\n\n\n if tmpAmps.max() >= silenceThreshold: \n #if True:\n tmpArray = y_raw[(i * frameLength):y_raw.shape[0]]\n \"\"\" Get most frequent number in that frames and fill \n all elements in the frame with it: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):y_raw.shape[0]] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):y_raw.shape[0]] = silenceClassNum\n\n return resArray", "def extract_features(\n fp, sample_rate, window_length, hop_length, n_mel, new_img_size, low_cut, high_cut\n):\n y, sr = librosa.load(fp, sr=args.sample_rate)\n y_filtered = butter_bandpass_filter(y, low_cut, high_cut, sr)\n melspectrogram_db = compute_melspectrogram_with_fixed_size(\n y_filtered, sample_rate, window_length, hop_length, n_mel, new_img_size\n )\n return melspectrogram_db", "def samples(self):\n pass", "def train_on_chunk(self, chunk, meter):\n # EXTRACT FEATURES:\n # find units:\n self.__setattr__('units', chunk.columns[0])\n # Loading treshold for getting events:\n thDelta = getattr(self, 'thDelta')\n chunk.index.name = 'date_time'\n # To prevent learning many samples at the middle of a edge:\n chunk.ix[:, 0][chunk.ix[:, 0] < thDelta] = 0\n # Learning edges\n chunk['delta'] = chunk.ix[:, 0].diff()\n chunk.delta.fillna(0, inplace=True)\n edges = chunk[np.abs(chunk['delta']) > thDelta].delta\n # Pairing on/off events\n #print(chunk)\n if len(edges) > 1:\n offpower = edges[edges.apply(np.sign).diff() == -2]\n onpower = edges[edges.apply(np.sign).diff(-1) == 2]\n duration = offpower.reset_index().date_time - \\\n onpower.reset_index().date_time\n duration = duration.astype('timedelta64[s]')\n\n # Set consistent index for concatenation:\n onpower = pd.DataFrame(onpower).reset_index(drop=True)\n onpower.columns = ['onpower']\n offpower = pd.DataFrame(offpower).reset_index(drop=True)\n offpower.columns = ['offpower']\n duration = pd.DataFrame(duration).reset_index(drop=True)\n duration.columns = ['duration']\n\n # Len of samples:\n print(\"Samples of onpower: \" + str(len(onpower)))\n print(\"Samples of offpower: \" + str(len(offpower)))\n print(\"Samples of duration: \" + str(len(duration)))\n\n number_of_events = len(onpower)\n # Features (concatenation)\n self.onpower_train = pd.concat(\n [self.onpower_train, onpower]).reset_index(drop=True)\n self.offpower_train = pd.concat(\n [self.offpower_train, offpower]).reset_index(drop=True)\n self.duration_train = pd.concat(\n [self.duration_train, duration]).reset_index(drop=True)\n \n else:\n number_of_events = 0\n print(\"\"\"WARNING: No paired events found on this chunk.\n Is it thDelta too high?\"\"\")\n \n self.duration_train = self.duration_train[self.duration_train.duration<400]\n\n # RE-TRAIN FEATURE MODELS:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)\n\n # UPDATE STATS:\n stat_dict = {'appliance': meter.identifier[\n 0], 'instance': meter.identifier[1], 'Nevents': number_of_events}\n instanceFound = False\n if len(self.stats) == 0:\n self.stats.append(stat_dict)\n else:\n for stat in self.stats:\n if ((stat['appliance'] == stat_dict['appliance']) and\n (stat['instance'] == stat_dict['instance'])):\n index = self.stats.index(stat)\n self.stats[index]['Nevents'] = self.stats[\n index]['Nevents'] + number_of_events\n instanceFound = True\n if not instanceFound:\n self.stats.append(stat_dict)", "def stop_on_low_ais_ess(trial_id, result):\n return result[\"ais_effective_sample_size\"] < 0.1", "def flag_samples(self,counts):\n counts = self.fov_qc(counts)\n counts = self.binding_density_qc(counts)\n counts = self.pos_control_linearity_qc(counts)\n counts = self.pos_control_detection_limit_qc(counts)\n return(counts)", "def minimum_samples(cls):\n return DEFAULT_MINIMUM_SAMPLE_COUNT", "def random_undersampling(dataset):\n\n\tminority_set = dataset[dataset.Trend == -1.0]\n\tmajority_set = dataset[dataset.Trend == 1.0]\n\n\t# print(dataset.Trend.value_counts())\n\n\t# If minority set larger than majority set, swap\n\tif len(minority_set) > len(majority_set):\n\t\tminority_set, majority_set = majority_set, minority_set\n\n\t# Downsample majority class\n\tmajority_downsampled = resample(majority_set,\n\t replace=False, # sample without replacement\n\t n_samples=len(minority_set), # to match minority class\n\t random_state=123) # reproducible results\n\n\t# Combine minority class with downsampled majority class\n\treturn pd.concat([majority_downsampled, minority_set])", "def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):\n self.max_age = max_age\n self.min_hits = min_hits\n self.iou_threshold = iou_threshold\n self.trackers = []\n self.frame_count = 0", "def _sample_trend_uncertainty(\n prophet_model: Prophet,\n n_samples: int,\n df: pd.DataFrame,\n iteration: int = 0,\n) -> np.ndarray:\n\n # when there is only historical data\n # given that df is sorted by time, it's last item has the largest date.\n if df[\"t\"].iloc[-1] <= 1:\n # there is no trend uncertainty in historic trends\n uncertainties = np.zeros((n_samples, len(df)))\n else:\n\n future_df = df.loc[df[\"t\"] > 1]\n n_length = len(future_df)\n hist_len = len(df) - n_length\n # handle 1 length futures by using history\n if n_length > 1:\n single_diff = np.diff(future_df[\"t\"]).mean()\n else:\n single_diff = np.diff(prophet_model.history[\"t\"]).mean()\n change_likelihood = len(prophet_model.changepoints_t) * single_diff\n deltas = prophet_model.params[\"delta\"][iteration]\n m0 = prophet_model.params[\"m\"][iteration]\n k = prophet_model.params[\"k\"][iteration]\n mean_delta = np.mean(np.abs(deltas)) + 1e-8\n if prophet_model.growth == \"linear\":\n mat = _make_trend_shift_matrix(\n mean_delta, change_likelihood, n_length, n_samples=n_samples\n )\n uncertainties = mat.cumsum(axis=1).cumsum(\n axis=1\n ) # from slope changes to actual values\n uncertainties *= single_diff # scaled by the actual meaning of the slope\n elif prophet_model.growth == \"logistic\":\n mat = _make_trend_shift_matrix(\n mean_delta, change_likelihood, n_length, n_samples=n_samples\n )\n uncertainties = _logistic_uncertainty(\n prophet_model=prophet_model,\n mat=mat,\n deltas=deltas,\n k=k,\n m=m0,\n cap=future_df[\"cap_scaled\"].values,\n t_time=future_df[\"t\"].values,\n n_length=n_length,\n single_diff=single_diff,\n )\n elif prophet_model.growth == \"flat\":\n # no trend uncertainty when there is no growth\n uncertainties = np.zeros((n_samples, n_length))\n else:\n raise NotImplementedError\n # historical part\n if hist_len > 0:\n past_uncertainty = np.zeros((n_samples, hist_len))\n uncertainties = np.concatenate([past_uncertainty, uncertainties], axis=1)\n return uncertainties", "def loudness_normalization(samples: tf.Tensor,\n target_db: float = 15.0,\n max_gain_db: float = 30.0):\n std = tf.math.reduce_std(samples) + 1e-9\n gain = tf.minimum(db_to_linear(max_gain_db), db_to_linear(target_db) / std)\n return gain * samples", "def get_sensor_bool_dryspot_runlevel(self, filename, threshold_min_counted_dryspots=5):\n f = h5py.File(filename, \"r\")\n meta_file = h5py.File(str(filename).replace(\"RESULT.erfh5\", \"meta_data.hdf5\"), 'r')\n\n try:\n single_states, set_of_states, useless_states = self.__get_dryspot_data(f, meta_file)\n multi_states = self.__get_pressure_data(f)\n multi_states = multi_states.squeeze()\n\n activated_sensors = np.count_nonzero(multi_states, axis=1)\n percentage_of_all_sensors = activated_sensors / 1140\n len_wanted_seq = 100\n current = 0\n sequence = np.zeros((len_wanted_seq, self.num_sensors))\n frame_labels = []\n\n if self.aux_info:\n original_frame_idxs = np.full(len_wanted_seq, np.nan, np.int16)\n frame_labels_aux = np.full(len_wanted_seq, np.nan, np.int8)\n sample_percentages = np.full(len_wanted_seq, np.nan)\n single_state_indices = np.full(len_wanted_seq, np.nan, np.int16)\n # flowfronts = np.zeros((len_wanted_seq, self.image_size[0], self.image_size[1]))\n # _coords, flat_fillings = self.__get_filling_data(f, single_states)\n\n for i, sample in enumerate(single_states):\n state_num = int(str(sample).replace(\"state\", \"0\"))\n try:\n sample_percentage = percentage_of_all_sensors[state_num - 1]\n if sample_percentage >= current / len_wanted_seq:\n data = multi_states[state_num - 1, :]\n data = np.log(np.add(data, 1)) # TODO make log optional\n if self.sensor_indizes != ((0, 1), (0, 1)):\n rect = data.reshape(38, 30)\n sel = rect[self.sensor_indizes[0][0]::self.sensor_indizes[0][1],\n self.sensor_indizes[1][0]::self.sensor_indizes[1][1]]\n data = sel.flatten()\n sequence[current, :] = data\n\n frame_label = 0\n if state_num in set_of_states:\n frame_label = 1\n frame_labels.append(frame_label)\n\n if self.aux_info:\n original_frame_idxs[current] = state_num\n frame_labels_aux[current] = frame_label\n sample_percentages[current] = sample_percentage\n single_state_indices[current] = i\n # flowfronts[current, :, :] = create_np_image(target_shape=self.image_size,\n # norm_coords=_coords, data=flat_fillings[i])\n current += 1\n except IndexError:\n continue\n\n # determine runlevel label using frame labels and threshold\n lens_of_runs_of_dryspots = [sum(1 for _ in group) for key, group in\n groupby(np.array(frame_labels) == 1) if key]\n max_len = 0 if len(lens_of_runs_of_dryspots) == 0 else max(lens_of_runs_of_dryspots)\n label = 0 if max_len < threshold_min_counted_dryspots else 1\n\n f.close()\n meta_file.close()\n\n if self.aux_info:\n # framelabels, original_frame_idx, original_num_frames, flowfronts, filling_percentage\n aux = {\"framelabel\": frame_labels_aux,\n \"original_frame_idx\": original_frame_idxs,\n \"original_num_multi_states\": len(multi_states),\n \"percent_of_sensors_filled\": sample_percentages,\n \"single_state_indices\": single_state_indices,\n }\n return [(sequence, label, aux)]\n\n return [(sequence, label)]\n except KeyError:\n f.close()\n meta_file.close()\n return None", "def test_large_import(self):\n self.create_sample_data_set_dir(\"node59p1.dat\", TELEM_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,750,400)", "def resample(generator, threshold=0.8):\n windows = (generator.step_size, )\n feature = _MeanFeature(windows)\n values = next(extract_features([feature], generator, n_jobs=1)).vector\n values.shape = (values.shape[0], values.shape[1])\n return values > threshold", "def timbral_warmth(fname, dev_output=False, phase_correction=False, clip_output=False, max_FFT_frame_size=8192,\n max_WR = 12000, fs=0):\n '''\n Read input\n '''\n audio_samples, fs = timbral_util.file_read(fname, fs, phase_correction=phase_correction)\n\n # get the weighted high frequency content\n mean_wr, _, _, weighted_hf = warm_region_cal(audio_samples, fs)\n\n # calculate the onsets\n envelope = timbral_util.sample_and_hold_envelope_calculation(audio_samples, fs, decay_time=0.1)\n envelope_time = np.arange(len(envelope)) / float(fs)\n\n # calculate the onsets\n nperseg = 4096\n original_onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs, nperseg=nperseg)\n # If onsets don't exist, set it to time zero\n if not original_onsets:\n original_onsets = [0]\n # set to start of file in the case where there is only one onset\n if len(original_onsets) == 1:\n original_onsets = [0]\n '''\n Initialise lists for storing features\n '''\n # set defaults for holding\n all_rms = []\n all_ratio = []\n all_SC = []\n all_WR_Ratio = []\n all_decay_score = []\n\n\n # calculate metrics for each onset\n for idx, onset in enumerate(original_onsets):\n if onset == original_onsets[-1]:\n # this is the last onset\n segment = audio_samples[onset:]\n else:\n segment = audio_samples[onset:original_onsets[idx+1]]\n\n segment_rms = np.sqrt(np.mean(segment * segment))\n all_rms.append(segment_rms)\n\n # get FFT of signal\n segment_length = len(segment)\n if segment_length < max_FFT_frame_size:\n freq, time, spec = spectrogram(segment, fs, nperseg=segment_length, nfft=max_FFT_frame_size)\n else:\n freq, time, spec = spectrogram(segment, fs, nperseg=max_FFT_frame_size, nfft=max_FFT_frame_size)\n\n # flatten the audio to 1 dimension. Catches some strange errors that cause crashes\n if spec.shape[1] > 1:\n spec = np.sum(spec, axis=1)\n spec = spec.flatten()\n\n # normalise for this onset\n spec = np.array(list(spec)).flatten()\n this_shape = spec.shape\n spec /= max(abs(spec))\n\n '''\n Estimate of fundamental frequency\n '''\n # peak picking algorithm\n peak_idx, peak_value, peak_x = timbral_util.detect_peaks(spec, freq=freq, fs=fs)\n # find lowest peak\n fundamental = np.min(peak_x)\n fundamental_idx = np.min(peak_idx)\n\n '''\n Warmth region calculation\n '''\n # estimate the Warmth region\n WR_upper_f_limit = fundamental * 3.5\n if WR_upper_f_limit > max_WR:\n WR_upper_f_limit = 12000\n tpower = np.sum(spec)\n WR_upper_f_limit_idx = int(np.where(freq > WR_upper_f_limit)[0][0])\n\n if fundamental < 260:\n # find frequency bin closest to 260Hz\n top_level_idx = int(np.where(freq > 260)[0][0])\n # sum energy up to this bin\n low_energy = np.sum(spec[fundamental_idx:top_level_idx])\n # sum all energy\n tpower = np.sum(spec)\n # take ratio\n ratio = low_energy / float(tpower)\n else:\n # make exception where fundamental is greater than\n ratio = 0\n\n all_ratio.append(ratio)\n\n '''\n Spectral centroid of the segment\n '''\n # spectral centroid\n top = np.sum(freq * spec)\n bottom = float(np.sum(spec))\n SC = np.sum(freq * spec) / float(np.sum(spec))\n all_SC.append(SC)\n\n '''\n HF decay\n - linear regression of the values above the warmth region\n '''\n above_WR_spec = np.log10(spec[WR_upper_f_limit_idx:])\n above_WR_freq = np.log10(freq[WR_upper_f_limit_idx:])\n np.ones_like(above_WR_freq)\n metrics = np.array([above_WR_freq, np.ones_like(above_WR_freq)])\n\n # create a linear regression model\n model = linear_model.LinearRegression(fit_intercept=False)\n model.fit(metrics.transpose(), above_WR_spec)\n decay_score = model.score(metrics.transpose(), above_WR_spec)\n all_decay_score.append(decay_score)\n\n\n '''\n get mean values\n '''\n mean_SC = np.log10(np.mean(all_SC))\n mean_decay_score = np.mean(all_decay_score)\n weighted_mean_ratio = np.average(all_ratio, weights=all_rms)\n\n if dev_output:\n return mean_SC, weighted_hf, mean_wr, mean_decay_score, weighted_mean_ratio\n else:\n\n '''\n Apply regression model\n '''\n all_metrics = np.ones(6)\n all_metrics[0] = mean_SC\n all_metrics[1] = weighted_hf\n all_metrics[2] = mean_wr\n all_metrics[3] = mean_decay_score\n all_metrics[4] = weighted_mean_ratio\n\n coefficients = np.array([-4.464258317026696,\n -0.08819320850778556,\n 0.29156539973575546,\n 17.274733561081554,\n 8.403340066029507,\n 45.21212125085579])\n\n warmth = np.sum(all_metrics * coefficients)\n\n # clip output between 0 and 100\n if clip_output:\n warmth = timbral_util.output_clip(warmth)\n\n return warmth", "def decreaseSearch(self):\n\n self.detections = self.detections()\n self.detections.location = self.location\n for index, row in enumerate(self.magdata):\n\n if row[3] < self.upper*self.background and row[3] < self.background - 2*self.std:\n # less than 50% of background and 2 std\n #print \"Large decrease (less than \" + str(self.upper*self.background) + \")\", row[3]\n self.detections.largeDecrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] < self.lower*self.background and row[3] < self.background - self.std:\n # less than 25% of background and 1 std\n #print \"Decrease (less than \" + str(self.lower*self.background) + \")\", row[3]\n self.detections.smallDecrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] > (1 + self.upper)*self.background and row[3] > self.background + 2*self.std:\n # greater than 50% of background\n #print \"Large increase (greater than \" + str((1 + self.upper)*self.background) + \")\", row[3]\n self.detections.largeIncrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] > (1 + self.lower)*self.background and row[3] > self.background + self.std:\n # greater than 25% of background\n #print \"Increase (greater than \" + str((1 + self.lower)*self.background) + \")\", row[3]\n self.detections.smallIncrease.append([index,self.timestamps[index], row[3]])\n\n \n if resultCounter(self.detections) == 0:\n self.detections.results.append(None)\n print \"No observable jumps/dips in magnetic field strength\"\n else:\n for attribute, bins in classIterator(self.detections):\n if 'crease' in attribute:\n self.detections.results.append('Number of ' + attribute + ': ' + str(len(bins)))", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def extract_band_power_per_epoch(band_power_df: pd.DataFrame,\n epoch_len: float=pysleep_defaults.epoch_len) -> pd.DataFrame:\n band_power_df['onset'] = band_power_df['onset'].apply(lambda x: pd.Timedelta(seconds=x))\n band_power_df = band_power_df.drop('duration', axis=1).set_index('onset')\n resampled_df = band_power_df.groupby(['chan', 'band']).resample(rule=str(epoch_len)+'s').mean().reset_index()\n resampled_df['onset'] = resampled_df['onset'].apply(lambda x: x.seconds)\n resampled_df['duration'] = epoch_len\n return resampled_df", "def prior_sample(self):\n pass", "def filter_negative(solver, sub_u_rate, threshold):\n classifier = joblib.load(join(join(_result_path, solver), str(sub_u_rate)) + '/logistic.pkl')\n\n sub_u = np.load(\"./processed_data/train/sub_u_\" + str(sub_u_rate) + \".npy\")\n sub_u_x = sub_u[:, :-1]\n result_sub_u = np.array(classifier.predict_proba(sub_u_x)[:, 1])\n\n sub_u_negative = sub_u[np.where(result_sub_u <= threshold)]\n print(sub_u_negative.shape)\n sub_u_negative_x = sub_u_negative[:, :-1]\n result_sub_u_negative = np.array(classifier.predict_proba(sub_u_negative_x)[:, 1])\n print(result_sub_u_negative.max())\n np.save(\"./processed_data/train/sub_u_negative.npy\", sub_u_negative)\n\n print(\"\\n\\n\\n\")\n unlabeled = np.load(\"./processed_data/train/raw/train_u.npy\")\n unlabeled_x = unlabeled[:, :-1]\n result_unlabeled = np.array(classifier.predict_proba(unlabeled_x)[:, 1])\n unlabeled_negative = unlabeled[np.where(result_unlabeled <= threshold)]\n print(unlabeled_negative.shape)\n result_unlabeled_negative = np.array(classifier.predict_proba(unlabeled_negative[:, :-1])[:, 1])\n print(result_unlabeled_negative.max())\n np.save(\"./processed_data/train/unlabeled_negative.npy\", unlabeled_negative)", "def over_sample(self) -> float:\n return self._over_sample", "def over_sample(self) -> float:\n return self._over_sample", "def measureUnfoldedLevel(ds, verbose = False):\n points = getIndexedTraces(ds)\n from sklearn.cluster import KMeans\n x = points[points[:,0] > 150, 1].reshape((-1,1))\n # remove outliers \n std = np.std(x)\n mean = np.mean(x)\n x = x[x > mean - 4*std].reshape((-1,1)) \n # ML clustering\n kmeans = KMeans(n_clusters=3, random_state=0).fit(x)\n x_cluster = kmeans.predict(x)\n means = [ np.mean(x[x_cluster == i]) for i in range(3)]\n means = sorted(means) \n level_one = means[1]\n if np.abs(level_one) > 0.35 or np.abs(level_one) < 0.1:\n print(\"Warning! Unfolded level detector in unexpected range: \",leven_one)\n if verbose: #feedback\n pyplot.figure()\n pyplot.hist2d(points[:,0], points[:,1], \n bins=(70*2, 50*2),\n range = [[0, 700], [-0.45, 0.05]],\n cmax = 100000/4 # clip max\n )\n pyplot.plot([0,700], [level_one]*2, 'r--')\n return level_one", "def make_downsample_filt_tensor(SR=16000, ENV_SR=200, WINDOW_SIZE=1001, beta=5.0, pycoch_downsamp=False):\n DOWNSAMPLE = SR/ENV_SR\n if not pycoch_downsamp: \n downsample_filter_times = np.arange(-WINDOW_SIZE/2,int(WINDOW_SIZE/2))\n downsample_filter_response_orig = np.sinc(downsample_filter_times/DOWNSAMPLE)/DOWNSAMPLE\n downsample_filter_window = signallib.kaiser(WINDOW_SIZE, beta)\n downsample_filter_response = downsample_filter_window * downsample_filter_response_orig\n else: \n max_rate = DOWNSAMPLE\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n if max_rate!=1: \n downsample_filter_response = signallib.firwin(2 * half_len + 1, f_c, window=('kaiser', beta))\n else: # just in case we aren't downsampling -- I think this should work? \n downsample_filter_response = zeros(2 * half_len + 1)\n downsample_filter_response[half_len + 1] = 1\n \n # Zero-pad our filter to put the output samples at the center\n # n_pre_pad = int((DOWNSAMPLE - half_len % DOWNSAMPLE))\n # n_post_pad = 0\n # n_pre_remove = (half_len + n_pre_pad) // DOWNSAMPLE\n # We should rarely need to do this given our filter lengths...\n # while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],\n # up, down) < n_out + n_pre_remove:\n # n_post_pad += 1\n # downsample_filter_response = np.concatenate((np.zeros(n_pre_pad), downsample_filter_response, np.zeros(n_post_pad)))\n \n downsample_filt_tensor = tf.constant(downsample_filter_response, tf.float32)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 0)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 2)\n downsample_filt_tensor = tf.expand_dims(downsample_filt_tensor, 3)\n\n return downsample_filt_tensor", "def importance_sampler(raw_data, analysis_settings):\n pass", "def restoreTrackThreshold(ants=0, subarray=DEFAULT) :\n subNo = subarrayNo\n if subarray == SCI2: subNo = 2\n toleranceMpName = \"Control.Subarray%d.trackTolerance\"%subNo\n tolerance = queryDouble(toleranceMpName, 24) # 24 retries (12 seconds)\n trackThreshold(tolerance, ants, subarray=subarray)", "def cal_samples(self):\n max_omega = max(\n abs(2 * np.pi * self.u.fundamental),\n abs(2 * np.pi * self.v.fundamental),\n abs(2 * np.pi * self.w.fundamental),\n )\n max_freq = max_omega / (2 * np.pi)\n self.fake_samples_number = (\n (max_freq ** 2) * 6 * self.u.data.shape[0] / self.u.sampling_rate\n )", "def thresholdInput(self,samples):\n self.__thresholdInput(samples)", "def resample(self):\n # propagate networks\n self.z = self.prior_latent_distribution.sample()\n # reconstruct image\n self.y_hat_raw = self.fcomb(self.unet_features, self.z)\n\n return self.y_hat_raw", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def find_best_attribute_threshold_entropy(original_training_data):\n\n ''' Initialize the values to horrible badness/infinity '''\n best_minimum_entropy = math.inf\n best_threshold = math.inf\n best_attribute = ''\n best_split_index = math.inf\n\n ''' Storing column names in a list '''\n column_name_list = original_training_data.columns.values.tolist()\n\n ''' Storing columns names in a list except the target attribute '''\n attribute_columns_list = column_name_list[:-1]\n\n ''' Converting dataframe values to a numpy array for faster access and calculations '''\n original_training_data_values = original_training_data.values\n\n ''' Iterating through the dataframe column by column. attribute column list is [Flour,Sugar,Oils,Proteins]'''\n for columnindex in range(0,len(attribute_columns_list)):\n\n ''' Storing all threshold values for each column in a separate dataframe. : means select all rows [:,columnindex] '''\n threshold_values = original_training_data_values[:,columnindex]\n\n ''' For every possible threshold we have to check if it gives us the best split and minimum entropy '''\n for threshold in threshold_values:\n ''' Checking if the value is within the range (0-10) '''\n if 0 < threshold <= 10:\n ''' Splitting the data according to the threshold value. less_than_threshold contains all records where the \n threshold value is less than the row value of that particular column. Similarly for more_than_threshold '''\n less_than_threshold = original_training_data[threshold_values <= threshold]\n more_than_threshold = original_training_data[threshold_values > threshold]\n\n ''' Calling the total weighted entropy by passing in two dataframes'''\n total_weighted_entropy = find_entropy(less_than_threshold,more_than_threshold)\n\n ''' Check if current entropy is less/better than best minimum entropy and if it is, then update the best minimum\n entropy to current entropy and store the split index, threshold, and attribute. '''\n if total_weighted_entropy < best_minimum_entropy:\n best_minimum_entropy = total_weighted_entropy\n best_threshold = threshold\n best_attribute = attribute_columns_list[columnindex]\n best_split_index = columnindex\n\n return best_split_index,best_attribute,best_threshold,best_minimum_entropy", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def fdc_flv(self, low_flow: float = 0.3) -> float:\n\n low_flow = 1.0 - low_flow\n # make sure that metric is calculated over the same dimension\n obs = self.true.flatten()\n sim = self.predicted.flatten()\n\n if (low_flow <= 0) or (low_flow >= 1):\n raise RuntimeError(\"l has to be in the range (0,1)\")\n\n # for numerical reasons change 0s to 1e-6\n sim[sim == 0] = 1e-6\n obs[obs == 0] = 1e-6\n\n # sort both in descending order\n obs = -np.sort(-obs)\n sim = -np.sort(-sim)\n\n # subset data to only top h flow values\n obs = obs[np.round(low_flow * len(obs)).astype(int):]\n sim = sim[np.round(low_flow * len(sim)).astype(int):]\n\n # transform values to log scale\n obs = np.log(obs + 1e-6)\n sim = np.log(sim + 1e-6)\n\n # calculate flv part by part\n qsl = np.sum(sim - sim.min())\n qol = np.sum(obs - obs.min())\n\n flv = -1 * (qsl - qol) / (qol + 1e-6)\n\n return float(flv * 100)", "def get_noise_thresholds(size_of_class=45, fakes='./data/CASIA1_fakes', originals='./data/CASIA1_originals', \n fakes_ela='./data/CASIA1_fakes_ela'):\n fakes_list = os.listdir(fakes)\n\n fakes = load_fakes(fakes_list, fakes, originals)\n\n noises = []\n for i, item in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, item.path.split('\\\\')[-1]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n noises.append(estimate_noise(image))\n\n fakes = np.array(fakes)\n noises = np.array(noises)\n idxs = noises.argsort()\n sorted_by_noise = fakes[idxs]\n\n for i, item in enumerate(sorted(noises)):\n if (i+1) % size_of_class == 0:\n print(\"####\", i+1, item)\n else:\n print(i+1, item)", "def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def track_foreground(self, diff_threshold=None, frames_avg=50,\n smooth_std=3):\n avg = self.get_average(frames_avg)\n self.track = []\n self.diffs = []\n for ind, layer in enumerate(self.layers):\n diff = abs(layer.load_image() - avg)\n diff = colors.rgb_to_hsv(diff)[..., 2]\n layer.image = None\n diff = gaussian_filter(diff, smooth_std)\n layer.diff = diff\n if diff_threshold is None:\n xs, ys = local_maxima(diff, disp=False, p=95)\n if len(xs) > 0:\n self.track += [(xs[0], ys[0])]\n else:\n self.track += [(np.nan, np.nan)]\n else:\n xs, ys = local_maxima(diff, disp=False,\n min_diff=diff_threshold)\n if len(xs) > 0:\n self.track += [(xs, ys)]\n else:\n self.track += [(np.nan, np.nan)]\n # self.diffs += [diff]\n # self.track += [(np.argmax(diff.mean(0)),\n # np.argmax(diff.mean(1)))]\n print_progress(ind, len(self.layers))", "def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot", "def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def filter(self, newer_than=None):\r\n if newer_than is None:\r\n newer_than = self._clock.time() - self._window.as_(Time.SECONDS)\r\n self._samples = [sample for sample in self._samples if sample[0] >= newer_than]", "def _set_number_of_subsamples(self, number_of_subsamples):\n self._number_of_subsamples = number_of_subsamples\n self._compute_down_sample_factor()", "def resample(self):\n pass", "def ConstrFeature(upper_bound):\n #upper_bound = 20000\n steps = 0\n marked_samples = sample(edges, upper_bound)\n for src, dest in marked_samples:\n print \"positive steps: %f\" % (float(steps)/upper_bound)\n\n # construct features\n features = Features(src, dest)\n # class\n features.append(1)\n\n # add an instance to instance list\n instances.append(features)\n\n steps += 1\n\n # sample nodes\n dests_samples = sample(nodes, upper_bound)\n # sample 20% nodes whoes outdegrees is less than median value\n low_out_point = [choice(rank_less) for i in range(int(upper_bound*0.2))]\n # sample 80% nodes whoes outdegrees is greater than median value\n high_out_point = [choice(rank_more) for i in range(int(upper_bound*0.8))]\n\n # build instances with four features\n for num, val in enumerate(dests_samples):\n print \"Progress: \", num / float(upper_bound)\n\n # 20% nodes\n if (num < upper_bound * 0.2):\n x, y = low_out_point[num], val\n # 80% nodes\n else:\n x, y = high_out_point[num-int(upper_bound*0.2)],val\n\n features = Features(x, y)\n\n if x in out_edges.keys():\n if y in out_edges.get(x):\n features.append(1)\n else:\n features.append(0)\n else:\n features.append(0)\n\n instances.append(features)", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def getUnscaledSamples(self, **kwargs) -> TimeData:\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n\n # get the files to read and the samples to take from them, in the correct order\n dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(\n options[\"startSample\"], options[\"endSample\"]\n )\n numSamples = options[\"endSample\"] - options[\"startSample\"] + 1\n # set up the dictionary to hold the data\n data = {}\n for chan in options[\"chans\"]:\n data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)\n\n # loop through chans and get data\n sampleCounter = 0\n for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):\n # get samples - this is inclusive\n dSamples = sToRead[1] - sToRead[0] + 1\n # spam files always record 5 channels\n dSamplesRead = dSamples * self.recChannels[dFile]\n # read the data\n byteOff = (\n self.dataByteOffset[dFile]\n + sToRead[0] * self.recChannels[dFile] * self.dataByteSize\n )\n dFilePath = os.path.join(self.dataPath, dFile)\n dataRead = np.memmap(\n dFilePath,\n dtype=self.dtype,\n mode=\"r\",\n offset=byteOff,\n shape=(dSamplesRead),\n )\n # now need to unpack this\n for chan in options[\"chans\"]:\n # check to make sure channel exists\n self.checkChan(chan)\n # get the channel index - the chanIndex should give the right order in the data file\n # as it is the same order as in the header file\n chanIndex = self.chanMap[chan]\n # use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value\n # scale by the lsb scalar here - note that these can be different for each file in the run\n data[chan][sampleCounter : sampleCounter + dSamples] = (\n dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]\n * scalar[chan]\n )\n # increment sample counter\n sampleCounter = sampleCounter + dSamples # get ready for the next data read\n\n # return data\n startTime, stopTime = self.sample2time(\n options[\"startSample\"], options[\"endSample\"]\n )\n comments = []\n comments.append(\n \"Unscaled data {} to {} read in from measurement {}, samples {} to {}\".format(\n startTime,\n stopTime,\n self.dataPath,\n options[\"startSample\"],\n options[\"endSample\"],\n )\n )\n comments.append(\"Data read from {} files in total\".format(len(dataFilesToRead)))\n comments.append(\n \"Data scaled to mV for all channels using scalings in header files\"\n )\n comments.append(\"Sampling frequency {}\".format(self.getSampleFreq()))\n return TimeData(\n sampleFreq=self.getSampleFreq(),\n startTime=startTime,\n stopTime=stopTime,\n data=data,\n comments=comments,\n )", "def subtrack_train(cur):\n assert cur[0].state_size >= cur[0].subtrack_maxlen, \"Track length is longer than input size\"\n op = cur[0]\n op.image_folder = 'image0'\n print(\"Round 0\")\n run_track(op)\n\n op = cur[1]\n for env in range(op.n_env - 1):\n op.image_folder = f'image{env+1}'\n print(f\"\\nRound {env+1}\")\n run_track(op)\n op.subtrack = False\n op.epoch = 50\n op.image_folder = 'imagefull'\n run_track(op)", "def _compute_cutoffs(self):\n self._cutoffidx=np.zeros(self.nsamples,dtype=np.int)\n # Find the inlfection point\n # TODO: check robustness of this method against fluctuations in the data\n self.samplesdatadiff=np.diff(self.samplesdata,axis=0)\n flex=np.argmax(self.samplesdatadiff,axis=0)\n # if the detected cycles is the last one, then the flex has not yet been reached, warn.\n for i,f in enumerate(flex):\n #self._message(\"(%s) Preanalysis - detection of inflection point.\"%(self.samples[i])) \n if f==(self.nvalues-1):\n self._cutoffidx[i]=f\n self._message(\"Warning: (%s) Inflection point not detected. Using all fluorescent values available (%d cycles).\"%(self.samples[i],f)) \n elif f<10:\n self._message(\"Warning: (%s) Early inflection point (cycle %d).\"%(self.samples[i],f))\n else: \n self._cutoffidx[i]=np.minimum(f+2,self.nvalues)\n #self._message(\"(%s) Inflection point found at cycle %d).\"%(self.samples[i],f)) ", "def _remove_flux_extinction(self):\n self.fluxUnred = self.flux.copy()\n self.fluxErrUnred = self.fluxErr.copy()\n self.fluxRenorm = self.flux.copy()\n self.fluxErrRenorm = self.fluxErr.copy()\n\n # Using negative a_v so that extinction.apply works in reverse and removes the extinction\n if self.mwebv:\n extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \\\n a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')\n\n for i, pb in enumerate(self._good_filters):\n mask = (self.passband == pb)\n\n flux_pb = self.flux[mask]\n fluxerr_pb = self.fluxErr[mask]\n npbobs = len(flux_pb)\n\n if npbobs < 1:\n return\n\n if self.mwebv:\n flux_out = extinction.apply(extinctions[i], flux_pb, inplace=False)\n fluxerr_out = extinction.apply(extinctions[i], fluxerr_pb, inplace=False)\n else:\n flux_out = flux_pb\n fluxerr_out = fluxerr_pb\n self.fluxUnred[mask] = flux_out\n self.fluxErrUnred[mask] = fluxerr_out\n\n if npbobs > 1:\n # there's at least enough observations to find minimum and maximum\n minfluxpb = flux_out.min()\n maxfluxpb = flux_out.max()\n norm = maxfluxpb - minfluxpb\n self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm\n self.fluxErrRenorm[mask] = fluxerr_out / norm\n elif npbobs == 1:\n # deal with the case with one observation in this passband by setting renorm = 0.5\n norm = self.fluxUnred[mask] / 0.5\n self.fluxRenorm[mask] /= norm\n self.fluxErrRenorm[mask] /= norm\n\n self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \\\n 'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']\n return", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def prepare_data_high(df, target_col, window_len=30, zero_base=True, test_size=0.2):\n # train test split\n train_data, test_data, train_target, test_target = train_test_split(df, test_size=test_size)\n \n # extract window data\n X_train = extract_window_data(train_data, window_len, zero_base)\n X_test = extract_window_data(test_data, window_len, zero_base)\n \n # extract targets\n #train_target, test_target = train_test_split(df.iloc[1:], test_size=test_size)\n y_train = train_target[target_col][window_len:].values\n y_test = test_target[target_col][window_len:].values\n if zero_base:\n y_train = y_train / train_target[target_col][:-window_len].values - 1\n y_test = y_test / test_target[target_col][:-window_len].values - 1\n\n return train_data, test_data, train_target, test_target, X_train, X_test, y_train, y_test", "def downsample_data(\n downsampled_frac,\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n):\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(fore_train_op),\n int(len(fore_train_op) * downsampled_frac),\n replace=False,\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(fore_valid_op),\n int(len(fore_valid_op) * downsampled_frac),\n replace=False,\n )\n\n fore_train_ip = [x[tmp_tr_id] for x in fore_train_ip]\n fore_train_op = fore_train_op[tmp_tr_id]\n fore_valid_ip = [x[tmp_val_id] for x in fore_valid_ip]\n fore_valid_op = fore_valid_op[tmp_val_id]\n\n np.random.seed(2023)\n tmp_tr_id = np.random.choice(\n len(train_op), int(len(train_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_val_id = np.random.choice(\n len(valid_op), int(len(valid_op) * downsampled_frac), replace=False\n )\n np.random.seed(2023)\n tmp_test_id = np.random.choice(\n len(test_op), int(len(test_op) * downsampled_frac), replace=False\n )\n\n train_ip = [x[tmp_tr_id] for x in train_ip]\n train_op = train_op[tmp_tr_id]\n valid_ip = [x[tmp_val_id] for x in valid_ip]\n valid_op = valid_op[tmp_val_id]\n test_ip = [x[tmp_test_id] for x in test_ip]\n test_op = test_op[tmp_test_id]\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n )", "def hand_samples(data,lfs,rfs,start_frame,cap,show=False):\n data['lhkpss'] = hand.translate_hand_kps(data['lhkpss'],data['kpss'],7);\n data['rhkpss'] = hand.translate_hand_kps(data['rhkpss'],data['kpss'],4);\n row1 = [];\n row2 = [];\n for f in lfs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,frame = cap.read();\n data['crop'] = ut.crop(frame,256,320);\n data['i'] = f;\n row1.append(draw_lh_lines(data)[105:-65,105:-65]);\n for f in rfs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,frame = cap.read();\n data['crop'] = ut.crop(frame,256,320);\n data['i'] = f;\n row2.append(draw_rh_lines(data)[65:-105,65:-105]);\n row1 = np.concatenate(row1,axis=1);\n row2 = np.concatenate(row2,axis=1);\n grid = np.concatenate([row1,row2]);\n if show:\n ut.show(grid);\n return grid;", "def _get_normalized_flow_countrywide(x_sample):\n global win; win /= 3\n global nebr; nebr = 7 # nebr /= 3\n global norm_min; norm_min = norm_min * 1. / 3\n global MIN_FLOW_NORM; MIN_FLOW_NORM = MIN_FLOW_NORM * 1. / 3\n global MIN_MOVE_PIXEL; MIN_MOVE_PIXEL /= (6*6)\n \n prev_frame = norm_trans(x_sample[-2])\n next_frame = norm_trans(x_sample[-1])\n kernel_shape = (79, 79) # (477/6, 477/6)\n flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, 0.5,3,win, 3, nebr, nebr/4, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)\n \n # flow_norm = numpy.linalg.norm(flow, axis=2) # for numpy version >= 1.8\n flow_norm = np.sum(flow**2, axis=2)**(1./2) # for numpy version < 1.8\n \n kernel = np.ones(kernel_shape, np.float32)\n\n# num_moved_flows = numpy.sum(flow_norm>norm_min)\n num_moved_flows = cv2.filter2D((flow_norm>norm_min).astype('float32'), -1, kernel, borderType=cv2.BORDER_REPLICATE)\n\n# if num_moved_flows > MIN_MOVE_PIXEL:\n# flow_fliter = numpy.zeros(shape=flow.shape);\n# flow_fliter[:,:,0] = flow[:,:,0] * (flow_norm > norm_min)\n# flow_fliter[:,:,1] = flow[:,:,1] * (flow_norm > norm_min)\n# \n# flow_mean = numpy.sum(flow_fliter, axis=(0,1)) / num_moved_flows\n# else:\n# flow_mean = numpy.array([0,0])\n \n flow_filter = flow * (flow_norm > norm_min)[:, :, np.newaxis]\n flow_mean = np.zeros_like(flow)\n flow_mean[:,:,0] = cv2.filter2D(flow_filter[:,:,0], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean[:,:,1] = cv2.filter2D(flow_filter[:,:,1], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean = flow_mean * (num_moved_flows > MIN_MOVE_PIXEL)[:, :, np.newaxis]\n\n# flow_mean_norm = np.sum(flow_mean**2)**(1./2)\n# if flow_mean_norm > MIN_FLOW_NORM:\n# flow_norm = flow_norm.reshape((flow_norm.shape[0], flow_norm.shape[1], 1)) \n# flow = flow * (flow_norm < MIN_FLOW_NORM) * flow_mean_norm / flow_norm + flow * (flow_norm >= MIN_FLOW_NORM)\n flow_mean_norm = np.sum(flow_mean**2, axis=2)**(1./2)\n flow = flow * ((flow_norm < MIN_FLOW_NORM) * (flow_mean_norm > MIN_FLOW_NORM) * flow_mean_norm / (flow_norm + 0.000001))[:, :, np.newaxis] + \\\n flow * ((flow_norm >= MIN_FLOW_NORM) | (flow_mean_norm <= MIN_FLOW_NORM))[:, :, np.newaxis] \n return flow", "def get_noise_frames(samples, sampling_rate, window_width=2048, stepsize=512, verbose=False):\n\t\n\t# Separate the samples in frames according to the window_width and stepsize\n\tnr_of_frames, frames = get_frames(samples, window_width=window_width, stepsize=stepsize)\n\t\n\t# Use a window function (hamming works best) on all our frames\n\tframes = window_function_transform(frames)\n\t\n\t# Get the statistical features that we need. For now only 'energy' works.\n\tenergies, mean_energy = get_statistcal_features( frames )\n\t\n\t# Get the energy coefficient that we need for separating pure noise from non-pure noise.\n\tSNR, energy_coefficient = compute_energy_coefficient(samples, base_coefficient=2)\n\t\n\tif verbose:\n\t\tprint(\"Energy coefficient: \" + str(round(energy_coefficient, 3) ) )\n\t\tprint(\"Signal-to-Noise: \" + str(round(SNR, 3)))\n\t\n\t\"\"\" Separating pure noise from non-pure noise. \"\"\"\n\t\n\t# Initiate lists to store the separated frames in.\n\tnoisy_frames = []\n\tnon_noisy_frames = []\n\tnoisy_energy = []\n\tnon_noisy_energy = []\n\t\n\t# Go through all of the frame-energies. The ones below a certain threshold have a very high chance of being pure background noise.\n\tfor index, energy in enumerate(energies):\n\t\t\n\t\tif energy < energy_coefficient * mean_energy:\n\t\t\t\n\t\t\t# Add the pure noisy parts to the appropriate list\n\t\t\tnoisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])\n\t\t\tnoisy_energy.append(energy)\n\t\t\n\t\telse:\n\t\t\t# Add the non-noise frames to the appropriate list\n\t\t\tnon_noisy_frames.extend(frames[index][int((window_width-stepsize)/2):int((window_width+stepsize)/2)])\n\t\t\tnon_noisy_energy.append(energy)\n\t\n\tif verbose:\n\t\t\n\t\t# A measure for how well the noise is predictable (higher is better). The better predictable it is, the better a spectral noise gate will work\n\t\tprint(\"Noise predictability: \" + str(round(autocorr(noisy_frames)[0,1] / autocorr(non_noisy_frames)[0,1], 3) ) )\n\t\n\t\t\"\"\" Plotting \"\"\"\n\t\t\n\t\t# Initiate time domain axes for some different graphs\n\t\tt_soundwave = np.linspace(0, len(samples)/sampling_rate, len(samples))\n\t\tt_soundwave_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_frames))\n\t\tt_soundwave_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_frames))\n\t\t\n\t\tt_windowed_features = np.linspace(0, len(samples)/sampling_rate, nr_of_frames)\n\t\tt_windowed_features_noisy = np.linspace(0, len(noisy_frames)/sampling_rate, len(noisy_energy))\n\t\tt_windowed_features_non_noisy = np.linspace(0, len(non_noisy_frames)/sampling_rate, len(non_noisy_energy))\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy whole signal\")\n\t\tplt.plot(t_soundwave, preprocessing.normalize(samples), alpha=0.5)\n\t\tplt.plot(t_windowed_features, preprocessing.normalize(energies))\n\t\tplt.show()\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy pure noise signal\")\n\t\tplt.plot(t_soundwave_noisy, preprocessing.normalize(noisy_frames), alpha=0.5)\n\t\tplt.plot(t_windowed_features_noisy, preprocessing.normalize(noisy_energy) )\n\t\tplt.show()\n\t\t\n\t\t# Plot the signal versus the signal energy\n\t\tplt.figure(figsize=(20,12))\n\t\tplt.title(\"Energy non pure noise signal\")\n\t\tplt.plot(t_soundwave_non_noisy, preprocessing.normalize(non_noisy_frames), alpha=0.5)\n\t\tplt.plot(t_windowed_features_non_noisy, preprocessing.normalize(non_noisy_energy))\n\t\tplt.show()\n\t\n\treturn np.array(noisy_frames)", "def get_all_trends(self, verbose=False):\n self.labels = []\n inferred_lines = []\n \n # crop, resize, color quantize\n self.crop_to_gridline()\n self.crop_title()\n self.set_size()\n self.color_quantization()\n \n if verbose:\n print(\"Original Image\")\n display(self.orig_image)\n print(\"Cropped and color quantized:\")\n self.display()\n \n # separate colors into color pixel, images, and pixels that belong to it\n img_and_pix = self.separate_colors()\n colors, images, pixels = zip(*img_and_pix)\n \n if verbose:\n print(\"LEN IMGS\", len(images), \"; SHOULD BE\", len(self.separate_colors()))\n \n for i, image in enumerate(images):\n \n # separate into x and y pixels\n pix = pixels[i]\n inferred_lines.append(img_and_pix[i])\n x,y = zip(*np.array(pix))\n \n if verbose:\n print('len(set(y))>20', len(set(y)))\n print('len(set(x))>20', len(set(x)))\n print('len(pix)<= {}; actual: {}'.format(self.background_threshold*np.prod(self.size), len(pix)))\n display(image)\n \n # If pixels don't fluctuate more than pixel_diff_threshold, \n # or size of color is greater than background_threshold..\n if len(pix) <= self.background_threshold*np.prod(self.size):\n if len(set(y))>self.pixel_diff_threshold \\\n and len(set(x))>self.pixel_diff_threshold:\n \n # take difference between pixels\n d = np.diff(pix, axis=0)\n segdists = np.sqrt((d ** 2).sum(axis=1))\n\n # Check if one line alone in the bitmap spans min_grid_length.\n # np.argmax(np.bincount(x)) -> keep x constant\n pot_vert_line = [j for i, j in pix if i == np.argmax(np.bincount(x))]\n pot_hor_line = [i for i, j in pix if j == np.argmax(np.bincount(y))]\n \n if verbose:\n print(\"Passed 0.25 pixel threshold and straight line threshold\")\n print(\"sum(segdists)/len(segdists)<7\", sum(segdists)/len(segdists))\n print(\"Y LINE \", pot_vert_line[0], pot_vert_line[-1], self.height*self.min_grid_span)\n print(\"X LINE\" , pot_hor_line[0], pot_hor_line[-1], self.width*self.min_grid_span)\n\n if sum(segdists)/len(segdists)<self.min_pixel_dist and \\\n len(pot_vert_line)<(self.height*self.min_grid_span) and \\\n len(pot_hor_line)<(self.width*self.min_grid_span):\n\n\n# inferred_lines.append(img_and_pix[i])\n # display(image)\n\n actual_y = self.height - np.array(pix)[:,1]\n \n df = pd.DataFrame(pix).groupby(0).agg(np.mean)\n xvals = df.index\n yvals = df.values.flatten().astype(int)\n slope, intercept, rvalue, pvalue, stderr = scp.stats.linregress(xvals, yvals)\n \n change = slope*len(yvals)\n if pvalue > 0.05 or abs(change) < 0.01:\n self.add_label(\"NO TREND\")\n else:\n if slope < 0:\n self.add_label(\"DECREASING\")\n else:\n self.add_label(\"INCREASING\") \n\n if verbose: \n print(\"-\"*50)\n if not self.labels:\n self.add_label(\"NO TREND\")\n return inferred_lines, self.labels", "def getThreshDB():\n calDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n pars = db.Query()\n bkg = dsi.BkgInfo()\n\n # loop over datasets\n # for ds in [0,1,2,3,4,5,6]:\n for ds in [6]:\n dsNum = ds if isinstance(ds, int) else 5\n goodChans = det.getGoodChanList(dsNum)\n\n for bkgIdx in bkg.getRanges(ds):\n\n # ==== loop over sub-ranges (when TF was run) ====\n rFirst, rLast = bkg.getRanges(ds)[bkgIdx][0], bkg.getRanges(ds)[bkgIdx][-1]\n\n subRanges = bkg.GetSubRanges(ds,bkgIdx)\n if len(subRanges) == 0: subRanges.append((rFirst, rLast))\n\n for subIdx, (runLo, runHi) in enumerate(subRanges):\n\n key = \"thresh_ds%d_bkg%d_sub%d\" % (dsNum, bkgIdx, subIdx)\n\n thD = dsi.getDBRecord(key, False, calDB, pars)\n print(key)\n for ch in thD:\n print(ch,\":\",thD[ch])\n print(\"\")", "def fine_tune(self, duration = 2):\n\n with sr.Microphone() as source:\n self.recorder.adjust_for_ambient_noise(source, duration=duration)\n return self.recorder.energy_threshold", "def __init__(self):\n super(LogSTFTMagnitudeLoss, self).__init__()", "def resample_unlab(pseudo, orig_dist, conf, limit=0):\n added = {}\n total_added = 0\n \n if limit is 0:\n limit = int(np.max(orig_dist))\n limit_set_by = conf[\"class_names\"][np.argmax(orig_dist)]\n else:\n limit_set_by = 'user'\n \n if conf[\"verbosity\"]:\n print ('Limit set by {} with {} samples'.format(limit_set_by, limit))\n print (\"-\"*50)\n\n new_findings = ([], [])\n new_findings_filepaths = []\n lab_arr = np.asarray(pseudo[\"lab_list\"], dtype=np.uint8)\n\n for class_idx in range(conf[\"num_classes\"]):\n # how many samples already in this class\n in_count = orig_dist[class_idx]\n\n indexes = np.where(lab_arr==class_idx)[0]\n num_new_findings = len(indexes)\n\n count = 0\n for count, idx in enumerate(indexes, start=1):\n if in_count >= limit:\n count -= 1 # reduce by one cuz of enumerate updates index early\n break\n fn = pseudo[\"name_list\"][idx]\n img = fn2img(fn, conf[\"unlab_dir\"], conf[\"img_shape\"][0])\n \n new_findings[0].append(img) # image\n new_findings[1].append(pseudo[\"lab_list\"][idx]) # label\n new_findings_filepaths.append(pseudo[\"name_list\"][idx]) # filepath\n in_count += 1\n \n total_added += count\n if conf[\"verbosity\"]:\n print (\"{:27}: added {}/{} samples\".format(conf[\"class_names\"][class_idx], count, num_new_findings))\n \n added[conf[\"class_names\"][class_idx]] = [count, num_new_findings]\n write_to_file(added, conf, 'samples_added_to_train')\n \n if conf[\"verbosity\"]:\n print (\"-\"*50)\n text = \"Added a total of {} samples to the training dataset. New dataset size is {}.\"\n print (text.format(total_added, conf[\"ds_sizes\"][\"train\"] + total_added))\n \n return new_findings, new_findings_filepaths", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def get_dft_train():\n\n single_dft = np.array(generate_doublet_train(ipi_to_frequency(155), num_pulses=6, pulse_duration=0.6))\n all_dft = []\n for i in range(150):\n dft = np.copy(single_dft)\n if all_dft:\n dft += all_dft[-1] + 500\n all_dft.extend(dft)\n\n return np.array(all_dft)" ]
[ "0.61294377", "0.58991325", "0.5723101", "0.56286037", "0.557679", "0.5432801", "0.5261728", "0.52388954", "0.5201363", "0.5196606", "0.51883674", "0.5182593", "0.51802176", "0.51758677", "0.5150192", "0.5148113", "0.5143918", "0.51152796", "0.51152796", "0.51152796", "0.5087558", "0.5086504", "0.50844115", "0.50748616", "0.5074183", "0.50678855", "0.50520724", "0.5051874", "0.5050721", "0.5039316", "0.5035039", "0.5033357", "0.5031227", "0.5030841", "0.50182533", "0.5017504", "0.5014362", "0.50137013", "0.5006106", "0.50003684", "0.4988275", "0.49865025", "0.49817735", "0.49586383", "0.49538302", "0.4942818", "0.49355462", "0.49222103", "0.49152207", "0.4912438", "0.48950753", "0.48950753", "0.48950753", "0.48887584", "0.48820713", "0.4881581", "0.4870061", "0.4870061", "0.48653045", "0.48606512", "0.4855927", "0.48544216", "0.48523903", "0.48504287", "0.4842896", "0.48418972", "0.48413685", "0.4835625", "0.48349085", "0.48296335", "0.48295918", "0.48294708", "0.4828433", "0.48270497", "0.48246655", "0.48204768", "0.48186374", "0.48094714", "0.48087195", "0.48085368", "0.48077857", "0.47982517", "0.4796832", "0.47945926", "0.47930032", "0.47915614", "0.47913086", "0.479029", "0.47880232", "0.47826862", "0.47822505", "0.47808748", "0.47782952", "0.4777887", "0.4776663", "0.47744623", "0.4769688", "0.4759416", "0.47584578", "0.47574306" ]
0.66418946
0
Generates a balanced set of training examples from a single dataset.
def get_examples(ds_data, network, parents, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) target = params.setdefault('target', int(1.2e6)) slice_len = params.setdefault('slice_len', 330) assert not target % len(classes) G = np.mean(ds_data, axis=0) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) count = 0 if verbose > 0: print('Generating {} training examples'.format(target)) bar = pb.ProgressBar(max_value=target, widgets=[pb.Percentage(), ' - ', pb.Bar(), ' - ', pb.ETA()]) for c in classes: pairs = np.argwhere(network == c) reps = int(target/len(classes)/pairs.shape[0]) + 1 pair_idx = np.repeat(np.arange(pairs.shape[0]), reps) pair_idx = np.random.permutation(pair_idx)[:target//len(classes)] start_idx = np.random.randint( 0, ds_data.shape[1]-slice_len, size=target//len(classes)) for i in range(pair_idx.size): n1 = pairs[pair_idx[i]][0] n2 = pairs[pair_idx[i]][1] assert(network[n1,n2] == c) start = start_idx[i] end = start + slice_len p1 = np.mean(ds_data[parents[n1], start:end], axis=0) p2 = np.mean(ds_data[parents[n2], start:end], axis=0) examples[count,:,:,0] = np.vstack(( p1, ds_data[n1][start:end], G[start:end], ds_data[n2][start:end], p2 )) labels[count,:] = np.equal(classes, c, dtype=np.int32) if verbose > 0: bar.update(count) count +=1 if verbose > 0: bar.finish() print( 'Generated examples of shape:', examples.shape, '\nGenerated labels of shape:', labels.shape, '\nThere are {} classes: {}'.format(len(classes), classes) ) assert not np.isnan(examples).any() return examples, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def inputs_balanced(batch_size, fake_data=False, one_hot=False, dtype=tf.float32, eval_data=False):\n class DataSets(object):\n pass\n data_sets = DataSets()\n if fake_data:\n def fake():\n return DataSetBalanced([], [], batch_size, fake_data=True, one_hot=one_hot, dtype=dtype, eval_data=eval_data)\n data_sets.train = fake()\n data_sets.validation = fake()\n data_sets.test = fake()\n return data_sets\n\n #testing = dict()\n validation = dict()\n training = dict()\n validation_labels = dict()\n #testing_labels = dict()\n training_labels = dict()\n if USE_MULTIPLE_FILES:\n validation, validation_labels = create_data_set(VALIDATION_FILE_LOCATION, eval_data)\n if not eval_data:\n training, training_labels = create_data_set(FILE_LOCATION, eval_data)\n #### HACK: I needed to do this so there would be some strange eosinophil in the validation data ####\n validation['strange_eosinophils'] = training['strange_eosinophils'][0:10]\n validation_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][0:10]\n training['strange_eosinophils'] = training['strange_eosinophils'][10:]\n training_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][10:]\n else:\n VALIDATION_SIZE = 20\n #TESTING_SIZE = 1\n data_examples = np.load(os.path.join(DATA_LOCATION, FILE_LOCATION))\n for name in cell_names:\n print(\"data_examples\")\n print(name+\":\"+str(data_examples[name].shape[0]))\n for i, name in enumerate(cell_names):\n if not eval_data:\n # make the random data consistent across runs\n np.random.seed(1)\n # Shuffle the data\n perm = np.arange(data_examples[name].shape[0])\n np.random.shuffle(perm)\n randomized_data = data_examples[name][perm]\n else:\n randomized_data = data_examples[name]\n validation[name] = randomized_data[:VALIDATION_SIZE]\n #testing[name] = randomized_data[VALIDATION_SIZE:VALIDATION_SIZE+TESTING_SIZE]\n if not eval_data:\n training[name] = randomized_data[VALIDATION_SIZE:]\n #training[name] = randomized_data[VALIDATION_SIZE+TESTING_SIZE:]\n training_labels[name] = to_categorical(np.full((training[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n validation_labels[name] = to_categorical(np.full((validation[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n #testing_labels[name] = to_categorical(np.full((testing[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n\n data_sets.validation = DataSetBalanced(validation, validation_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n #data_sets.testing = DataSetBalanced(testing, testing_labels, batch_size, fake_data=False, one_hot=True, dtype=tf.uint8, eval_data=eval_data)\n if not eval_data:\n data_sets.train = DataSetBalanced(training, training_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n\n return data_sets", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def generate_dataset(\n datasets, networks, parents, mode='train', mean=None, \n verbose=1, **params):\n # Parameters\n classes = params.setdefault('classes', [-1,0,1])\n data_type = params.setdefault('data_type', 'spikes')\n thres = params.setdefault('thres', 150.0)\n target = params.setdefault('target', int(1.2e6))\n valid_split = params.setdefault('valid_split', 0.1)\n slice_len = params.setdefault('slice_len', 330)\n \n assert len(datasets) == len(networks) == len(parents)\n examples = np.zeros((target, 5, slice_len, 1))\n labels = np.zeros((target, len(classes)))\n ex_per_netw = target//len(datasets)\n params['target'] = ex_per_netw\n \n for i in range(len(datasets)):\n \n if verbose > 0:\n print('Network {} of {}'.format(i+1, len(datasets)))\n \n data = datasets[i]\n network = networks[i]\n parents_ = parents[i]\n \n if data_type == 'spikes':\n ds_data = downsample_spikes(data, thres=thres, verbose=verbose)\n elif data_type == 'fluorescence':\n ds_data = downsample_fluorescence(\n data, thres=thres, verbose=verbose)\n else:\n raise ValueError('Invalid data type')\n \n start = i*ex_per_netw\n end = (i+1)*ex_per_netw\n examples[start:end], labels[start:end] = get_examples(\n ds_data, network, parents_, verbose=verbose, **params)\n \n shuffle_idx = np.random.permutation(np.arange(examples.shape[0]))\n examples = examples[shuffle_idx]\n labels = labels[shuffle_idx]\n \n if mode == 'train':\n \n idx = int(examples.shape[0]*valid_split)\n ex_valid, ex_train = np.split(examples, [idx], axis=0)\n lbl_valid, lbl_train = np.split(labels, [idx], axis=0)\n \n mean = np.mean(ex_train, axis=0)\n ex_train -= mean\n ex_valid -= mean\n \n return ex_train, ex_valid, lbl_train, lbl_valid, mean\n \n elif mode == 'test':\n \n assert mean != None\n examples -= mean\n \n return examples, labels\n \n else:\n raise ValueError('Invalid mode')", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n files = self.source_data_files(data_dir, tmp_dir, dataset_split)\n vocab = _extract_vocab_data(files)\n\n # Determine the number of instances to generate\n if dataset_split == problem.DatasetSplit.TRAIN:\n num_instances = self.num_train_instances\n else:\n num_instances = self.num_eval_instances\n\n for _ in range(num_instances):\n instance_size = random.randint(self.min_size, self.max_size)\n tokens = random.choices(vocab, k=instance_size)\n instance = ''.join(tokens)\n yield {'inputs': instance, 'targets': instance}", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def get_test_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,\n n_samples, use_one_hot, class_partition,\n upsample_control):\n # modify seq_len in case we do upsampling control\n if upsample_control:\n upsample_factor = seq_len\n seq_len = 1\n if not self.two_class:\n raise NotImplementedError()\n\n # construct all possible classes\n classes = [\"\".join(seq) for seq in \\\n itertools.product(\"01\", repeat=seq_len)]\n\n # get the right number of samples per class to get a balanced data set\n # with the desired n_samples.\n num = n_samples\n div = len(classes)\n n_samples_per_class = [num // div + (1 if x < num % div else 0) \\\n for x in range (div)]\n\n # find indices of samples with the wanted digit class\n y_data = [np.argmax(y) for y in y_data]\n digit_idx = []\n digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])\n digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])\n\n # generate samples for every class\n samples = []\n labels = []\n for i,c in enumerate(classes):\n this_label = i\n digits_to_sample = [int(c[i]) for i in range(len(c))]\n for s in range(n_samples_per_class[i]):\n this_sample = None\n for d in digits_to_sample:\n rand_idx = self._rstate.randint(len(digit_idx[d]))\n sample_idx = digit_idx[d][rand_idx]\n digit_sample = x_data[sample_idx]\n if this_sample is None:\n this_sample = digit_sample\n else:\n this_sample = np.vstack((this_sample,digit_sample)) \n samples.append(this_sample)\n labels.append(this_label)\n\n # if configured sort labels into 2 classes\n labels = np.asarray(labels)\n if self.two_class and not upsample_control:\n lbl_mask = np.isin(labels, class_partition)\n labels[~lbl_mask] = 0\n labels[lbl_mask] = 1\n\n if upsample_control:\n for i,s in enumerate(samples):\n # Initial timestep is absolute start position of digit. To\n # translate to a higher resolution image, we can just multiply\n # the abolute position vby the scaling factor.\n upsample = s[0,:]*upsample_factor\n for t in np.arange(1,s.shape[0]):\n # don't do upsampling at end of strokes or end of digits\n if all((s[t,2] == 0, s[t,3] == 0)):\n # Repeat original stroke \"upsample_factor\" times, such\n # that the relative stroke length is identical if\n # images are normalized to same resolution.\n for k in range(upsample_factor):\n upsample = np.vstack((upsample, s[t,:]))\n else:\n upsample = np.vstack((upsample, s[t,:]))\n samples[i] = upsample\n\n # structure output data\n out_data = labels.reshape(-1, 1)\n if use_one_hot:\n n_classes = 2**seq_len\n if self.two_class:\n n_classes = 2\n\n # FIXME We shouldn't call this method if the validation set size is\n # zero.\n if out_data.size == 0:\n out_data = np.matlib.repmat(out_data, 1, n_classes)\n else:\n # FIXME use internal method `_to_one_hot` and set required class\n # attributes beforehand.\n one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])\n one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)\n out_data = one_hot_encoder.transform(out_data).toarray()\n\n if self.target_per_timestep:\n out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)\n\n # structure input data\n in_data = np.zeros((n_samples,max_seq_len,4))\n sample_lengths = np.zeros(n_samples)\n for i,s in enumerate(samples):\n in_data[i,:s.shape[0],:] = s\n sample_lengths[i] = s.shape[0]\n\n in_data = self._flatten_array(in_data)\n\n return in_data, out_data, sample_lengths", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def _sample_mini_dataset(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def create_imbalanced_seed(data, num_classes, seed_size, prop, label):\n labelled_pool_idx = []\n unlabelled_pool_idx = [i for i in range(len(data))]\n label_weights = [1 if x != label else prop for x in range(num_classes)]\n total_weight = sum(label_weights)\n # these are the number of labelled examples of each class we would like to include.\n # these are floats as we can exceed some classes by 1 to get desired seed size\n desired_seed_label_count = [x*seed_size/total_weight for x in label_weights]\n # TODO change counts to defaultdicts to avoid key errors\n current_seed_label_count = [0 for _ in range(num_classes)]\n\n while len(labelled_pool_idx) < seed_size:\n sample_idx = random.choice(unlabelled_pool_idx)\n example = data[sample_idx]\n if current_seed_label_count[example['label']] < desired_seed_label_count[example['label']]:\n # add to labelled pool\n labelled_pool_idx.append(sample_idx)\n current_seed_label_count[example['label']] += 1\n # remove from unlabelled pool. TODO more efficient?\n unlabelled_pool_idx = [i for i in range(len(data)) if i not in labelled_pool_idx]\n\n return unlabelled_pool_idx, labelled_pool_idx", "def create_data_sets(reviews, labels, write_to_pickle=True, problem=\"\"):\n def sanity_check(labels):\n print str(len(labels)) + \" total labels. \" + str(sum(labels)) + \" positive labels. \" \\\n + str(len(labels) - sum(labels)) + \" negative labels. \"\n\n train_reviews = []\n train_labels = []\n dev_reviews = []\n dev_labels = []\n test_reviews = []\n test_labels = []\n\n total_train = int(len(reviews) * 0.5 / 2) # divided by 2 because of 2 classes\n total_dev = int(len(reviews) * 0.25 / 2)\n\n current_pos_training = 0\n current_neg_train = 0\n current_pos_dev = 0\n current_neg_dev = 0\n\n for (review, vote) in zip(reviews, labels):\n if vote == 1:\n if current_pos_training < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_pos_training += 1\n elif current_pos_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_pos_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Negative review\n else:\n if current_neg_train < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_neg_train += 1\n elif current_neg_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_neg_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Shuffle data for every dataset\n combined_lists = zip(train_reviews, train_labels)\n np.random.shuffle(combined_lists)\n train_reviews, train_labels = zip(*combined_lists)\n\n combined_lists = zip(dev_reviews, dev_labels)\n np.random.shuffle(combined_lists)\n dev_reviews, dev_labels = zip(*combined_lists)\n\n combined_lists = zip(test_reviews, test_labels)\n np.random.shuffle(combined_lists)\n test_reviews, test_labels = zip(*combined_lists)\n\n # Sanity checks\n print \"Total reviews: \" + str(len(reviews))\n print \"Original distribution: \"\n sanity_check(labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(train_labels)\n print \"========================\"\n print \"Dev labels\"\n sanity_check(dev_labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(test_labels)\n\n # Write to pickles\n N = len(reviews)\n if write_to_pickle:\n print \"Writing to pickle...\"\n pickle.dump([train_reviews, train_labels],\n open(\"TrainSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([dev_reviews, dev_labels],\n open(\"DevSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([test_reviews, test_labels],\n open(\"TestSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n print \"Done.\"\n\n return train_reviews, train_labels, dev_reviews, dev_labels, test_reviews, test_labels", "def test_trainGenerator():\n\n # check type\n assert isinstance(trainset, surprise.trainset.Trainset)\n\n # the number of users in trainset should be equal to the user from database plus 1\n assert len(trainset.all_users()) == len(svd.song_df.user_id.unique())+1", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def balance_dataset_sampling(instances):\n probabilities = get_balancing_probabilities(instances)\n new_instances = [ (features, classification) \n for features, classification in instances \n if random.random() < probabilities[classification] ]\n\n return new_instances", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def split_dataset(X, Y, train_size=0.8):\n if train_size != 1.0:\n return train_test_split(\n X, Y,\n train_size=train_size,\n stratify=Y\n )\n else:\n X_, Y_ = shuffle(\n X, Y\n )\n return X_, [], Y_, []", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def cross_validation_datasets(self, fold):\n if fold > len(self): fold = len(self) / 2\n stratified = self.stratified_bunches(fold)\n datasets = []\n for index in range(len(stratified)):\n gold = GoldInstances(training_as_gold(stratified[index]))\n rest = flatten(stratified[:index]) + flatten(stratified[index + 1:])\n training = TrainingInstances(rest)\n datasets.append((training, gold))\n return datasets", "def build_toy_dataset(N):\n y_data = np.random.uniform(-10.5, 10.5, N)\n r_data = np.random.normal(size=N) # random noise\n x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0\n x_data = x_data.reshape((N, 1))\n return train_test_split(x_data, y_data, random_state=42)", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def default_generator(self,\n dataset,\n epochs=1,\n predict=False,\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n if not predict:\n print('Starting epoch %i' % epoch)\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n\n feed_dict = dict()\n if y_b is not None and not predict:\n for index, label in enumerate(self.labels_fd):\n if self.mode == \"classification\":\n feed_dict[label] = to_one_hot(y_b[:, index])\n if self.mode == \"regression\":\n feed_dict[label] = y_b[:, index:index + 1]\n if w_b is not None:\n feed_dict[self.weights] = w_b\n # Transform SMILES string to integer vectors\n smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]\n feed_dict[self.smiles_seqs] = np.stack(smiles_seqs, axis=0)\n yield feed_dict", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def generate_data(data, samples, targeted=True, start=0, inception=True):\n \n assert (targeted==True and start==0 and inception==True)\n \n \n inputs = []\n targets = []\n \n '''\n for i in range(samples):\n if targeted:\n if inception:\n seq = random.sample(range(1,1001), 10)\n else:\n seq = range(data.test_labels.shape[1])\n\n for j in seq:\n if (j == np.argmax(data.test_labels[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets.append(data.test_labels[start+i])\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n '''\n\n return inputs, targets", "def get_train_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'train')", "def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group", "def split_dataset(dataset, test_size):\r\n random.shuffle(dataset)\r\n \r\n rating_negativ = []\r\n rating_positiv = []\r\n \r\n for row in dataset:\r\n if int(row[1]) == 0:\r\n rating_negativ.append(row)\r\n elif int(row[1]) == 1:\r\n rating_positiv.append(row)\r\n\r\n random.shuffle(rating_positiv)\r\n random.shuffle(rating_negativ) \r\n \r\n neg_train_data, neg_val_data = train_test_split(rating_negativ, test_size=test_size)\r\n pos_train_data, pos_val_data = train_test_split(rating_positiv, test_size=test_size)\r\n \r\n train_data = neg_train_data + pos_train_data\r\n val_data = neg_val_data + pos_val_data\r\n \r\n random.shuffle(train_data)\r\n random.shuffle(val_data)\r\n \r\n return train_data, val_data", "def create_simple_data_set(\n n_training_points,\n n_testing_points,\n low=0,\n high=3,\n mode=training_testing_split.SEPERATE,\n kernel=kernel_matern,\n shuffle=True,\n):\n gp = gaussian_process(kernel=kernel, verbose=True)\n\n mid = (low + high) / 2\n\n if mode == training_testing_split.SEPERATE_LONG:\n x_training, x_testing = __seperate_long(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.SEPERATE:\n x_training, x_testing = __seperate(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.INTERSPREAD:\n x_training, x_testing = __interspread(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.RANDOM:\n x_training, x_testing = __random(n_training_points, n_testing_points, low, high)\n elif mode == training_testing_split.MIXED:\n\n def r(z):\n dist = np.random.randint(low=1, high=100, size=4)\n λ = lambda x: x / dist.sum()\n vfunc = np.vectorize(λ)\n dist = vfunc(dist)\n return (z * dist).round().astype(int)\n\n training_dist = r(n_training_points)\n testing_dist = r(n_testing_points)\n x1, x2 = __random(training_dist[0], testing_dist[0], low, high)\n x11, x22 = __interspread(training_dist[1], testing_dist[1], low, high)\n x111, x222 = __interspread(training_dist[2], testing_dist[2], low, high)\n x1111, x2222 = __seperate(training_dist[3], testing_dist[3], low, high)\n x_training = np.vstack([x1, x11, x111, x1111])\n x_testing = np.vstack([x2, x22, x222, x222])\n\n y_samples = gp.sample(np.vstack([x_training, x_testing]), 1).squeeze()\n y_training = y_samples[: len(x_training)].reshape(-1, 1)\n y_testing = y_samples[len(x_training) :].reshape(-1, 1)\n training_data_set = data_loader.DataSet(X=x_training, Y=y_training)\n testing_data_set = data_loader.DataSet(X=x_testing, Y=y_testing)\n\n if shuffle:\n training_data_set.shuffle()\n testing_data_set.shuffle()\n\n return training_data_set, testing_data_set", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def generate(self, labels, list_IDs, n_classes):\n # Infinite loop\n while 1:\n # Generate order of exploration of dataset\n indexes = self.__get_exploration_order(list_IDs)\n\n # Generate batches\n imax = int(len(indexes)/self.batch_size)\n for i in range(imax):\n # Find list of IDs\n list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]\n print(\"Producing\")\n #print(list_IDs_temp)\n # Generate data\n X, y = self.__data_generation(labels, list_IDs_temp, n_classes)\n # print(X.shape)\n # print(y.shape)\n #print(\"Target Label\")\n #print(y)\n gc.collect()\n yield X, y", "def generate_train_test_data(data_dir = '../../att_faces'):\n\n train_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(1, 36)]\n test_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(36, 41)]\n \n true_combinations_train = generate_true_combinations(train_data)\n false_combinations_train = generate_false_combination(train_data, int(len(true_combinations_train) / len(train_data)), 10)\n \n true_combinations_test = generate_true_combinations(test_data)\n false_combinations_test = generate_false_combination(test_data, int(len(true_combinations_test) / len(test_data)), 10)\n \n return prepare_to_classifier(true_combinations_train, false_combinations_train, true_combinations_test, false_combinations_test)", "def get_generators_from_ds(dataset):\n data_train = tf.data.Dataset.from_tensor_slices(\n (dataset['x_train'], dataset['y_train']))\n data_test = tf.data.Dataset.from_tensor_slices(\n (dataset['x_test'], dataset['y_test']))\n\n return data_train, data_test", "def make_data_iterator(dataset, batch_size):\n \n if dataset == '8gaussians':\n scale = 2.\n centers = [\n (1, 0),\n (-1, 0),\n (0, 1),\n (0, -1),\n (1. / np.sqrt(2), 1. / np.sqrt(2)),\n (1. / np.sqrt(2), -1. / np.sqrt(2)),\n (-1. / np.sqrt(2), 1. / np.sqrt(2)),\n (-1. / np.sqrt(2), -1. / np.sqrt(2))\n ]\n centers = [(scale * x, scale * y) for x, y in centers]\n while True:\n dataset = []\n for i in range(batch_size):\n point = np.random.randn(2) * .2\n center = random.choice(centers)\n point[0] += center[0]\n point[1] += center[1]\n dataset.append(point)\n dataset = torch.Tensor(dataset)\n dataset /= 1.414 # stdev\n yield dataset\n \n elif dataset == 'sine':\n while True:\n noise = 0.2\n x = torch.linspace(-4, 4, batch_size, dtype=torch.float32)\n y = np.sin(x) + noise*np.random.randn(*x.shape)\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'heteroscedastic':\n theta = torch.linspace(0, 2, batch_size)\n x = np.exp(theta)*np.tan(0.1*theta)\n while True:\n b = (0.001 + 0.5 * np.abs(x)) * np.random.normal(1, 1, batch_size)\n y = np.exp(theta)*np.sin(0.1*theta) + b\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'moon':\n noise = 0.1\n while True:\n data, _ = sklearn.datasets.make_moons(n_samples=batch_size,\n noise=noise)\n yield torch.Tensor(data)\n \n elif dataset == 'helix':\n noise = 0.2\n while True:\n t = torch.linspace(0, 20, batch_size)\n x = np.cos(t)\n x2 = np.sin(t) + noise * np.random.randn(*x.shape)\n \n yield torch.stack([x, x2, t], dim=1)\n \n elif dataset == 'circle':\n while True:\n t = np.random.random(batch_size) * 2 * np.pi - np.pi\n length = 1 - np.random.random(batch_size)*0.4\n x = torch.Tensor(np.multiply(np.cos(t), length))\n y = torch.Tensor(np.multiply(np.sin(t), length))\n \n yield torch.stack([x, y], dim=1)\n\n elif dataset == '2spirals':\n while True:\n z = torch.randn(batch_size, 2)\n n = torch.sqrt(torch.rand(batch_size // 2)) * 540 * (2 * math.pi) / 360\n d1x = - torch.cos(n) * n + torch.rand(batch_size // 2) * 0.5\n d1y = torch.sin(n) * n + torch.rand(batch_size // 2) * 0.5\n x = torch.cat([torch.stack([ d1x, d1y], dim=1),\n torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3\n yield x + 0.1*z", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def generate_dataset(data_dir, train=True, k=100, b=10, s=2):\n img_sampler = get_noisy_sampler(data_dir)\n num_sampler = get_number_sampler(load_mnist(data_dir, 'train' if train else 'test'))\n\n data = []\n idx = 0\n for j in range(1 << b):\n for _ in range(k):\n if (idx + 1) % 1000 == 0 or idx + 1 == k * (1 << b):\n print('%d/%d' % (idx + 1, k * (1 << b)))\n idx += 1\n frames = []\n\n numbers = get_numbers(j)\n\n for i in numbers:\n for _ in range(np.random.randint(s) + 1):\n img = img_sampler.sample((28, 28))\n num = num_sampler.sample(i)\n put_numbers(img, num)\n frames.append(img)\n\n for i in range(25 - len(frames)):\n img = img_sampler.sample((28, 28))\n frames.append(img)\n\n np.random.shuffle(frames)\n video = np.array(frames)\n data.append((video, j))\n return data", "def evenly_partition_dataset(data, labels, nb_teachers):\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n nclasses = len(labels[0])\n print(\"Start Index Selection\")\n data_sel = [data[labels[:, j] == 1] for j in range(nclasses)]\n print(\"End Index Selection\")\n i = 0\n data_sel_id = [0] * len(labels[0])\n partition_data = []\n partition_labels = []\n\n while True:\n partition_data.append(data_sel[i][data_sel_id[i]])\n partition_labels.append(np_utils.to_categorical(i, nclasses))\n\n if len(partition_data) == batch_len:\n partition_data = np.asarray(partition_data)\n partition_labels = np.asarray(partition_labels)\n yield partition_data, partition_labels\n partition_data = []\n partition_labels = []\n\n data_sel_id[i] += 1\n if data_sel_id[i] == len(data_sel[i]):\n data_sel_id[i] = 0\n i = (i + 1) % nclasses", "def make_fixture(binary=False, balanced=False, split=False):\n kwargs = {\n \"n_samples\": 100,\n \"n_features\": 20,\n \"n_informative\": 8,\n \"n_redundant\": 2,\n \"n_clusters_per_class\": 1,\n \"random_state\": 89092,\n }\n\n if binary:\n kwargs[\"n_classes\"] = 2\n kwargs[\"weights\"] = None if balanced else [0.3, 0.7]\n else:\n kwargs[\"n_classes\"] = 5\n kwargs[\"weights\"] = None if balanced else [0.1, 0.2, 0.4, 0.2, 0.01]\n\n X, y = make_classification(**kwargs)\n\n if split:\n X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=101)\n return Dataset(Split(X_train, X_test), Split(y_train, y_test))\n\n return Dataset(X, y)", "def random_cls_dataset(request):\n set_seed()\n shape = request.param.get('shape', 10)\n size = request.param.get('size', 100)\n X, Y = make_classification(n_samples=2*size, n_features=shape, n_classes=10, n_informative=10, n_redundant=0)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5)\n Y_train, Y_test = Y_train.astype(np.int64), Y_test.astype(np.int64)\n return (X_train, Y_train), (X_test, Y_test)", "def balance_dataset(dataset):\n \n print(\"Balancing dataset...\")\n n = len(dataset)\n labels = ch.Tensor([dataset[i][1] for i in range(n)]).int()\n n0 = sum(labels).item()\n I_pos = labels == 1\n\n idx = ch.arange(n)\n idx_pos = idx[I_pos]\n ch.manual_seed(0)\n I = ch.randperm(n - n0)[:n0]\n idx_neg = idx[~I_pos][I]\n idx_bal = ch.cat([idx_pos, idx_neg],dim=0)\n return Subset(dataset, idx_bal)", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def get_train_full_examples(self, data_dir):\n raise NotImplementedError()", "def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size", "def _sample_mini_dataset_mil(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n gifs, states, actions = class_obj.sample(num_shots)\n for shot_idx in range(num_shots):\n start_idx, end_idx = shot_idx*class_obj.T, (shot_idx + 1)*class_obj.T\n g, s, a = gifs[start_idx:end_idx], states[start_idx:end_idx], actions[start_idx:end_idx]\n yield (g, s, a)", "def gen_batches(data, batch_size=8, randomize=False):\n indices = list(range(len(data)))\n targets = [randint(0, N_CLASSES - 1) for _ in indices] # random labels\n if randomize:\n shuffle(indices)\n\n for start in range(0, len(data), batch_size):\n labels = np.array(targets[start:start + batch_size])\n yield (pad_sequences(data[indices[start:start + batch_size]]),\n labels, labels)", "def shuffle_dataset(self):\n # TODO explain approached used for selecting training and test data\n labels = self.dataset.label.unique()\n good_jobs = self.dataset[self.dataset.label == \"Good\"]\n bad_jobs = self.dataset[self.dataset.label == \"Bad\"]\n\n # TODO n>2 probablly won't work the way it's supposed to currently\n if len(labels) == 2:\n # oversample\n resize = max(len(good_jobs.label),len(bad_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label))\n good_jobs_re = good_jobs.sample(resize)\n bad_jobs_re = bad_jobs.sample(resize)\n dataset = pd.concat([good_jobs_re, bad_jobs_re])\n elif len(labels) == 3:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n # oversample\n resize = max(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re])\n elif len(labels) == 4:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n ideal_jobs = self.dataset[self.dataset.label == \"Ideal\"]\n\n # middle of the road approach\n resize = int(mean([len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label),len(ideal_jobs.label)]))\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n ideal_jobs_re = ideal_jobs.sample(resize,replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re,ideal_jobs_re])\n\n train,test = train_test_split(dataset,test_size=0.25,stratify = dataset.label,shuffle=True)\n #test = self.dataset[~self.dataset.isin(train)].dropna()\n #test = self.dataset[(~dataset.label.isin(self.dataset.label))&(~dataset.description.isin(self.dataset.description))]\n #0tr_hashes = [hash(tuple(d)) for d in train.description]\n #ytest = [val for iter,val in self.dataset.iterrows() if hash(tuple(val.description)) not in tr_hashes]\n\n self.y_train,self.y_test = train.label.values,test.label.values\n self.X_train,self.X_test = train.description.values,test.description.values", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n train = dataset_split == problem.DatasetSplit.TRAIN\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_bpe_dataset(tmp_dir, dataset_path)\n\n # Vocab\n token_path = os.path.join(data_dir, self.vocab_filename)\n if not tf.gfile.Exists(token_path):\n token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)\n tf.gfile.Copy(token_tmp_path, token_path)\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\n f.write(vocab_data)\n\n return text_problems.text2text_txt_iterator(train_path + \".en\",\n train_path + \".de\")", "def generate_dataset(self):\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()", "def balanced_split(dataset, test_size):\n\tprint(\"\\tSplitting data into *balanced* training and test sets\")\n\n\t# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays\n\n\tx_train, x_test, y_train, y_test = train_test_split(dataset.drop([\"Date\", \"Trend\"], axis=1).values, dataset[\"Trend\"].values, test_size=test_size, random_state=RANDOM_STATE)\n\n\t# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels\n\ttrain = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])\n\ttrain[\"Trend\"] = pd.Series(y_train)\n\n\t# Do the same for x_test and y__test\n\ttest = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])\n\ttest[\"Trend\"] = pd.Series(y_test)\n\n\t# Apply random undersampling to both data frames\n\ttrain_downsampled = random_undersampling(train)\n\ttest_downsampled = random_undersampling(test)\n\n\ttrain_trend = train_downsampled[\"Trend\"].values\n\ttest_trend = test_downsampled[\"Trend\"].values\n\ttrain_trimmed = train_downsampled.drop([\"Trend\"], axis=1).values\n\ttest_trimmed = test_downsampled.drop([\"Trend\"], axis=1).values\n\n\treturn train_trimmed, test_trimmed, train_trend, test_trend", "def data_generator(batch_size, preprocessor, x, y):\n num_examples = len(x)\n examples = zip(x, y)\n examples = sorted(examples, key = lambda x: x[0].shape[0])\n end = num_examples - batch_size + 1\n batches = [examples[i:i+batch_size]\n for i in range(0, end, batch_size)]\n random.shuffle(batches)\n while True:\n for batch in batches:\n x, y = zip(*batch)\n yield preprocessor.process(x, y)", "def get_datasets(\n self, stage: str, num_samples_per_class: int = None\n ) -> \"OrderedDict[str, Dataset]\":\n num_samples_per_class = num_samples_per_class or 320\n\n datasets = super().get_datasets(stage=stage)\n datasets[\"train\"] = {\n \"dataset\": datasets[\"train\"],\n \"sampler\": BalanceClassSampler(\n labels=datasets[\"train\"].targets, mode=num_samples_per_class\n ),\n }\n return datasets", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df", "def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets = []\n labels = []\n true_ids = []\n for i in range(samples):\n if targeted:\n if inception:\n # for inception, randomly choose 10 target classes\n seq = np.random.choice(range(1, 1001), 1)\n # seq = [580] # grand piano\n else:\n # for CIFAR and MNIST, generate all target classes\n seq = range(data.test_labels.shape[1])\n\n # print ('image label:', np.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == np.argmax(data.test_labels[start + i])) and (inception == False):\n continue\n inputs.append(data.test_data[start + i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data.test_data[start + i])\n targets.append(data.test_labels[start + i])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n\n return inputs, targets, labels, true_ids", "def genTrainingSet(set_of_CSVs, file_to_classify, train_size = 5):\n set_of_csvs_minus_target = copy.copy(set_of_CSVs)\n # remove the file we want to classify\n set_of_csvs_minus_target.remove(file_to_classify)\n\n # extract out the random noise files\n # first, set the seed\n random.seed(time.time())\n # now sample\n return_list = random.sample(set_of_csvs_minus_target, train_size)\n return return_list", "def divide_train_test(self, sentences, tags):\n logging.info('Dividindo dataset em 10 folds')\n kf = KFold(n_splits=10)\n train, test = [], []\n for train_index, test_index in kf.split(sentences):\n train.append(train_index)\n test.append(test_index)\n return train, test", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def load_data_train(self, shuffle=True):\n\n data, label = self._generate_all_combinations_of_stripe_images(shuffle=shuffle);\n\n return data, label;", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets_1hot = []\n i = 0\n samples_sofar = 0\n while samples_sofar < samples:\n i += 1\n if torch.argmax(model(torch.tensor(data.test_data[start+i:start+i+1]+0.5, device=\"cuda\", dtype=torch.float32).permute(0, 3, 1, 2))) != np.argmax(data.test_labels_1hot[start+i]):\n continue\n\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), 10)\n else:\n seq = range(data.test_labels_1hot.shape[1])\n\n # print ('image label:', torch.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == torch.argmax(data.test_labels_1hot[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets_1hot.append(\n torch.eye(data.test_labels_1hot.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets_1hot.append(data.test_labels_1hot[start+i])\n\n samples_sofar += 1\n\n inputs = torch.tensor(inputs).permute(0, 3, 1, 2)\n targets_1hot = torch.tensor(targets_1hot)\n\n return inputs, targets_1hot", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets", "def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels", "def shuffle_and_split_data(X_genesets, y, train_size, validate_size):\n permutation = np.random.permutation(y.size)\n y_permuted = y[permutation]\n X_genesets_permuted = [Xg[permutation, :] for Xg in X_genesets]\n X_groups_train = [Xg[0:train_size, :] for Xg in X_genesets_permuted]\n X_groups_validate = [Xg[train_size: validate_size + train_size, :] for Xg in X_genesets_permuted]\n X_groups_test = [Xg[validate_size + train_size:, :] for Xg in X_genesets_permuted]\n y_train = y_permuted[0:train_size]\n y_validate = y_permuted[train_size: validate_size + train_size]\n y_test = y_permuted[validate_size + train_size:]\n return X_groups_train, y_train, X_groups_validate, y_validate, X_groups_test, y_test", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def test_dataset():\n # File paths\n intervals_file = \"example_files/intervals.bed\"\n target_file = \"example_files/targets.tsv\"\n gtf_file = \"example_files/gencode.v24.annotation_chr22.gtf\"\n fasta_file = \"example_files/hg38_chr22.fa\"\n ds = SeqDistDataset(intervals_file, fasta_file, gtf_file, target_file)\n\n ds[0]\n ds[10]\n it = ds.batch_iter(32)\n next(it)", "def generate_datasets(self, positive_data_directory: str = 'positive', negative_data_directory: str = 'negative'\n ) -> (tf.data.Dataset, tf.data.Dataset):\n positive_example_paths = list(self.data_directory.joinpath(positive_data_directory).glob('*.feather'))\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = list(self.data_directory.joinpath(negative_data_directory).glob('*.feather'))\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n load_and_preprocess_function = lambda file_path: tuple(\n tf.py_function(self.load_and_preprocess_example_file, [file_path], [tf.float32, tf.int32]))\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_dataset = training_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n training_dataset = training_dataset.map(self.set_shape_function, num_parallel_calls=16)\n training_dataset = training_dataset.batch(self.batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n validation_dataset = validation_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.map(self.set_shape_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.batch(self.batch_size).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator", "def get_samples(self):\n result = []\n segmentsize=30\n # Reduce this to very little to get very large trainingsets\n stride=5\n noOfBuckets=40\n for start in range(0, len(self.data) - segmentsize, stride):\n if start + segmentsize <= len(self.data):\n segments_buckets = self.get_buckets(start, start + segmentsize, noOfBuckets)\n result.append(segments_buckets)\n return result", "def load_train_test_transactions(train_size=0.7):\n X, y = features_target_split()\n X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=train_size, random_state=7)\n print('\\nTraining and testing data creation successful\\n')\n return X_train, X_test, y_train,y_test", "def main(unused_argv):\n del unused_argv\n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n\n tfds_cached_dict = {}\n data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None\n name = FLAGS.dataset_name\n tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)\n dataset_dict = tfds_cached_dict[name]\n dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(\n dataset_dict[tfds.Split.TRAIN])\n dataset_dict[tfds.Split.TEST] = tfds.as_numpy(\n dataset_dict[tfds.Split.TEST])\n # To mock the API of tfds.load to cache the downloaded datasets.\n # Used as an argument to `get_dataset`.\n def load_fn(name, data_dir=None, batch_size=-1):\n # This function will always return the whole dataset.\n assert batch_size == -1\n del data_dir\n del batch_size\n return tfds_cached_dict[name]\n class_ids = sorted([int(x) for x in FLAGS.class_ids])\n num_classes = len(class_ids)\n for i in range(num_classes):\n for j in range(i+1, num_classes):\n print('Generating pos {} neg {}'.format(i, j))\n positive_class = class_ids[i]\n negative_class = class_ids[j]\n random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)\n for seed in random_seeds:\n dataset = create_projected_binary_dataset(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.num_train_examples, FLAGS.num_valid_examples,\n FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)\n filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.projected_dim, seed)\n serialized_dataset = dataset.SerializeToString()\n\n with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:\n f.write(serialized_dataset)", "def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest", "def split_dataset(samples, ratio=0.8):\n nsamples = len(samples)\n num_train = int(ratio*nsamples)\n\n # shuffle samples\n shuffle(samples)\n\n trainset = samples[:num_train]\n testset = samples[num_train:]\n\n return trainset, testset", "def minibatches(dataset: List[T],\n batch_size: int,\n shuffle: bool = True) -> Iterator[List[T]]:\n # start indexes 0, batch_size, 2 * batch_size, ...\n batch_starts = [start for start in range(0, len(dataset),batch_size)]\n\n if shuffle: random.shuffle(batch_starts) #shuffle the butches\n\n for start in batch_starts:\n end = start + batch_size\n yield dataset[start: end]", "def build_train_generator(X: numpy.array, y: numpy.array,\n batch_size: int = 500) -> Iterable[Tuple[numpy.array]]:\n assert X.shape[0] == y.shape[0], \"Number of samples mismatch in X and y.\"\n\n def xy_generator():\n while True:\n n_batches = X.shape[0] // batch_size\n if n_batches * batch_size < X.shape[0]:\n n_batches += 1 # to yield last samples\n for i in range(n_batches):\n start = i * batch_size\n end = min((i + 1) * batch_size, X.shape[0])\n yield X[start:end], y[start:end]\n return xy_generator()", "def test_intent_classifier_get_training_samples(self):\n pass", "def get_dataset(filename, target_name, training_fraction):\n csv_reader = csv.DictReader(open(filename))\n attributes_names = [attribute_name for attribute_name in csv_reader.fieldnames\n if attribute_name != target_name]\n train_dataset = Dataset(attributes_names, target_name)\n test_dataset = Dataset(attributes_names, target_name)\n instances = []\n for row in csv_reader:\n instances.append(([row[attribute_name] for attribute_name in attributes_names],\n row[target_name]))\n all_instances_indexes = set(range(0, len(instances)))\n number_of_training_instances = int(len(all_instances_indexes) * training_fraction)\n training_instances_indexes = set(sample(all_instances_indexes, number_of_training_instances))\n test_instances_indexes = all_instances_indexes.difference(training_instances_indexes)\n for instance_index in training_instances_indexes:\n train_dataset.add_instance(instances[instance_index][0], instances[instance_index][1])\n for instance_index in test_instances_indexes:\n test_dataset.add_instance(instances[instance_index][0], instances[instance_index][1])\n return train_dataset, test_dataset", "def maybe_generate_data(data_dir,\n shape=None,\n num_examples=None,\n stone_probability=0.45,\n num_files=2):\n dest_dir = os.path.join(data_dir, \"batches-bin\")\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n # Log hook to measure progress\n # TODO: not in use\n def _progress(count, block_size, total_size):\n sys.stdout.write(\"\\r>> Generating %s %.1f%%\" % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n # generate training batches\n # constrained\n filenames = [\"data_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")\n\n # generate testing batches\n # random\n # TODO: generate random dataset\n filenames = [\"test_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n # utils.generate_dataset(filepath, _progress, **{\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")", "def create_train_test_sets(self,x,y,lenTest):\n \n nbInd = x.shape[0]\n shuffler = np.random.permutation(nbInd)\n x_train = x[shuffler][0:(nbInd-lenTest),]\n y_train = y[shuffler][0:(nbInd-lenTest),]\n\n x_test = x[shuffler][(nbInd-lenTest):nbInd,]\n y_test = y[shuffler][(nbInd-lenTest):nbInd,]\n\n return x_train,y_train,x_test,y_test", "def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test", "def get_dataset(dataset_dir, split_name, batch_size, workers):\n folder = os.path.join(dataset_dir, '{}_*.tfrecord'.format(split_name))\n filenames = tf.data.Dataset.list_files(folder)\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.shuffle(1000)\n dataset = dataset.repeat()\n dataset = dataset.map(preprocess, num_parallel_calls=workers)\n dataset = dataset.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(2)\n\n filename = '{}.txt'.format(split_name)\n with open(os.path.join(dataset_dir, filename), 'r') as f:\n examples = int(f.read().strip())\n\n return dataset.make_one_shot_iterator(), examples", "def partition_dataset_train():\n dataset = datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n train_set = torch.utils.data.DataLoader(\n partition, batch_size=bsz, shuffle=True)\n return train_set, bsz", "def rand_checkers(n1=100, n2=100, n3=100, n4=100, sigma=0.1):\n nb1 = n1 // 8\n nb2 = n2 // 8\n nb3 = n3 // 8\n nb4 = n4 // 8\n\n xapp = np.reshape(np.zeros((nb1 + nb2 + nb3 + nb4) * 16),\n [(nb1 + nb2 + nb3 + nb4) * 8, 2])\n yapp = np.ones((nb1 + nb2 + nb3 + nb4) * 8)\n idx = 0\n nb = 2 * nb1\n for i in range(-2, 2):\n for j in range(-2, 2):\n yapp[idx:(idx + nb)] = [fmod(i - j + 100, 4)] * nb\n xapp[idx:(idx + nb), 0] = np.random.rand(nb)\n xapp[idx:(idx + nb), 0] += i + sigma * np.random.randn(nb)\n xapp[idx:(idx + nb), 1] = np.random.rand(nb)\n xapp[idx:(idx + nb), 1] += j + sigma * np.random.randn(nb)\n idx += nb\n\n ind = np.arange((nb1 + nb2 + nb3 + nb4) * 8)\n np.random.shuffle(ind)\n res = np.hstack([xapp, yapp[:, np.newaxis]])\n return np.array(res[ind, :])", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def partition_train_valid_test2(data, classes, others, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n if others is not None:\n train_set_others=others[train_ind]\n else:\n train_set_others=None\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n if others is not None:\n valid_set_others=others[valid_ind]\n else:\n valid_set_others=None\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n if others is not None:\n test_set_others=others[test_ind]\n else:\n test_set_others=None\n \n return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others", "def balance_classes(data, labels):\n\n index_dict = {}\n\n for idx, label in enumerate(labels):\n if label not in index_dict:\n index_dict[label] = [idx]\n else:\n index_dict[label] += [idx]\n\n index_list = list(index_dict.values())\n\n min_balanced_number = min([len(l) for l in index_list])\n\n index_to_take_list = np.concatenate([\n np.random.choice(l, min_balanced_number, replace=False)\n for l in index_list\n ])\n\n np.random.shuffle(index_to_take_list)\n\n return data[index_to_take_list], labels[index_to_take_list]", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()" ]
[ "0.6719611", "0.66851825", "0.668324", "0.65884167", "0.6578879", "0.6477982", "0.6476447", "0.6440723", "0.6435114", "0.6385229", "0.6352234", "0.634321", "0.634212", "0.63130134", "0.63006437", "0.62959516", "0.6281407", "0.6270701", "0.6244588", "0.62174755", "0.6190903", "0.61715853", "0.61578935", "0.6130546", "0.6108114", "0.61079377", "0.6098519", "0.60940254", "0.60734123", "0.60661995", "0.6053324", "0.6047818", "0.6037834", "0.60364425", "0.6036004", "0.60159177", "0.60131174", "0.59906274", "0.59884197", "0.59858537", "0.5985719", "0.59832984", "0.59832", "0.598218", "0.5976228", "0.5973297", "0.59674644", "0.59653294", "0.5961315", "0.5957308", "0.59565055", "0.59529555", "0.5942049", "0.5940275", "0.5932622", "0.5926122", "0.5924836", "0.5922939", "0.591868", "0.591774", "0.59142655", "0.5911301", "0.5905122", "0.5904974", "0.59048694", "0.5902382", "0.5896984", "0.5893908", "0.5882223", "0.58771205", "0.5874752", "0.5874048", "0.58643043", "0.5864014", "0.5853048", "0.5850253", "0.5850253", "0.58500487", "0.58442813", "0.5843353", "0.58303285", "0.58263284", "0.5824059", "0.5823546", "0.5823436", "0.58192825", "0.5816421", "0.5815415", "0.58134985", "0.5812287", "0.58120155", "0.5808137", "0.5808105", "0.58055526", "0.5802767", "0.5797741", "0.5794688", "0.5785916", "0.57809544", "0.5779856" ]
0.6441262
7
Generates a balanced set of training examples from one or more datasets.
def generate_dataset( datasets, networks, parents, mode='train', mean=None, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) data_type = params.setdefault('data_type', 'spikes') thres = params.setdefault('thres', 150.0) target = params.setdefault('target', int(1.2e6)) valid_split = params.setdefault('valid_split', 0.1) slice_len = params.setdefault('slice_len', 330) assert len(datasets) == len(networks) == len(parents) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) ex_per_netw = target//len(datasets) params['target'] = ex_per_netw for i in range(len(datasets)): if verbose > 0: print('Network {} of {}'.format(i+1, len(datasets))) data = datasets[i] network = networks[i] parents_ = parents[i] if data_type == 'spikes': ds_data = downsample_spikes(data, thres=thres, verbose=verbose) elif data_type == 'fluorescence': ds_data = downsample_fluorescence( data, thres=thres, verbose=verbose) else: raise ValueError('Invalid data type') start = i*ex_per_netw end = (i+1)*ex_per_netw examples[start:end], labels[start:end] = get_examples( ds_data, network, parents_, verbose=verbose, **params) shuffle_idx = np.random.permutation(np.arange(examples.shape[0])) examples = examples[shuffle_idx] labels = labels[shuffle_idx] if mode == 'train': idx = int(examples.shape[0]*valid_split) ex_valid, ex_train = np.split(examples, [idx], axis=0) lbl_valid, lbl_train = np.split(labels, [idx], axis=0) mean = np.mean(ex_train, axis=0) ex_train -= mean ex_valid -= mean return ex_train, ex_valid, lbl_train, lbl_valid, mean elif mode == 'test': assert mean != None examples -= mean return examples, labels else: raise ValueError('Invalid mode')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def create_data_sets(reviews, labels, write_to_pickle=True, problem=\"\"):\n def sanity_check(labels):\n print str(len(labels)) + \" total labels. \" + str(sum(labels)) + \" positive labels. \" \\\n + str(len(labels) - sum(labels)) + \" negative labels. \"\n\n train_reviews = []\n train_labels = []\n dev_reviews = []\n dev_labels = []\n test_reviews = []\n test_labels = []\n\n total_train = int(len(reviews) * 0.5 / 2) # divided by 2 because of 2 classes\n total_dev = int(len(reviews) * 0.25 / 2)\n\n current_pos_training = 0\n current_neg_train = 0\n current_pos_dev = 0\n current_neg_dev = 0\n\n for (review, vote) in zip(reviews, labels):\n if vote == 1:\n if current_pos_training < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_pos_training += 1\n elif current_pos_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_pos_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Negative review\n else:\n if current_neg_train < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_neg_train += 1\n elif current_neg_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_neg_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Shuffle data for every dataset\n combined_lists = zip(train_reviews, train_labels)\n np.random.shuffle(combined_lists)\n train_reviews, train_labels = zip(*combined_lists)\n\n combined_lists = zip(dev_reviews, dev_labels)\n np.random.shuffle(combined_lists)\n dev_reviews, dev_labels = zip(*combined_lists)\n\n combined_lists = zip(test_reviews, test_labels)\n np.random.shuffle(combined_lists)\n test_reviews, test_labels = zip(*combined_lists)\n\n # Sanity checks\n print \"Total reviews: \" + str(len(reviews))\n print \"Original distribution: \"\n sanity_check(labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(train_labels)\n print \"========================\"\n print \"Dev labels\"\n sanity_check(dev_labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(test_labels)\n\n # Write to pickles\n N = len(reviews)\n if write_to_pickle:\n print \"Writing to pickle...\"\n pickle.dump([train_reviews, train_labels],\n open(\"TrainSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([dev_reviews, dev_labels],\n open(\"DevSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([test_reviews, test_labels],\n open(\"TestSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n print \"Done.\"\n\n return train_reviews, train_labels, dev_reviews, dev_labels, test_reviews, test_labels", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def inputs_balanced(batch_size, fake_data=False, one_hot=False, dtype=tf.float32, eval_data=False):\n class DataSets(object):\n pass\n data_sets = DataSets()\n if fake_data:\n def fake():\n return DataSetBalanced([], [], batch_size, fake_data=True, one_hot=one_hot, dtype=dtype, eval_data=eval_data)\n data_sets.train = fake()\n data_sets.validation = fake()\n data_sets.test = fake()\n return data_sets\n\n #testing = dict()\n validation = dict()\n training = dict()\n validation_labels = dict()\n #testing_labels = dict()\n training_labels = dict()\n if USE_MULTIPLE_FILES:\n validation, validation_labels = create_data_set(VALIDATION_FILE_LOCATION, eval_data)\n if not eval_data:\n training, training_labels = create_data_set(FILE_LOCATION, eval_data)\n #### HACK: I needed to do this so there would be some strange eosinophil in the validation data ####\n validation['strange_eosinophils'] = training['strange_eosinophils'][0:10]\n validation_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][0:10]\n training['strange_eosinophils'] = training['strange_eosinophils'][10:]\n training_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][10:]\n else:\n VALIDATION_SIZE = 20\n #TESTING_SIZE = 1\n data_examples = np.load(os.path.join(DATA_LOCATION, FILE_LOCATION))\n for name in cell_names:\n print(\"data_examples\")\n print(name+\":\"+str(data_examples[name].shape[0]))\n for i, name in enumerate(cell_names):\n if not eval_data:\n # make the random data consistent across runs\n np.random.seed(1)\n # Shuffle the data\n perm = np.arange(data_examples[name].shape[0])\n np.random.shuffle(perm)\n randomized_data = data_examples[name][perm]\n else:\n randomized_data = data_examples[name]\n validation[name] = randomized_data[:VALIDATION_SIZE]\n #testing[name] = randomized_data[VALIDATION_SIZE:VALIDATION_SIZE+TESTING_SIZE]\n if not eval_data:\n training[name] = randomized_data[VALIDATION_SIZE:]\n #training[name] = randomized_data[VALIDATION_SIZE+TESTING_SIZE:]\n training_labels[name] = to_categorical(np.full((training[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n validation_labels[name] = to_categorical(np.full((validation[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n #testing_labels[name] = to_categorical(np.full((testing[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n\n data_sets.validation = DataSetBalanced(validation, validation_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n #data_sets.testing = DataSetBalanced(testing, testing_labels, batch_size, fake_data=False, one_hot=True, dtype=tf.uint8, eval_data=eval_data)\n if not eval_data:\n data_sets.train = DataSetBalanced(training, training_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n\n return data_sets", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def generate_datasets(self, positive_data_directory: str = 'positive', negative_data_directory: str = 'negative'\n ) -> (tf.data.Dataset, tf.data.Dataset):\n positive_example_paths = list(self.data_directory.joinpath(positive_data_directory).glob('*.feather'))\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = list(self.data_directory.joinpath(negative_data_directory).glob('*.feather'))\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n load_and_preprocess_function = lambda file_path: tuple(\n tf.py_function(self.load_and_preprocess_example_file, [file_path], [tf.float32, tf.int32]))\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_dataset = training_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n training_dataset = training_dataset.map(self.set_shape_function, num_parallel_calls=16)\n training_dataset = training_dataset.batch(self.batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n validation_dataset = validation_dataset.map(load_and_preprocess_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.map(self.set_shape_function, num_parallel_calls=16)\n validation_dataset = validation_dataset.batch(self.batch_size).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def generate_datasets(self, rand=None, *args, **kwargs):\n raise NotImplementedError()", "def get_examples(ds_data, network, parents, verbose=1, **params):\n # Parameters\n classes = params.setdefault('classes', [-1,0,1])\n target = params.setdefault('target', int(1.2e6))\n slice_len = params.setdefault('slice_len', 330)\n \n assert not target % len(classes)\n \n G = np.mean(ds_data, axis=0) \n examples = np.zeros((target, 5, slice_len, 1))\n labels = np.zeros((target, len(classes)))\n count = 0\n \n if verbose > 0:\n print('Generating {} training examples'.format(target))\n bar = pb.ProgressBar(max_value=target,\n widgets=[pb.Percentage(), ' - ',\n pb.Bar(), ' - ',\n pb.ETA()])\n \n for c in classes:\n \n pairs = np.argwhere(network == c)\n reps = int(target/len(classes)/pairs.shape[0]) + 1\n pair_idx = np.repeat(np.arange(pairs.shape[0]), reps)\n pair_idx = np.random.permutation(pair_idx)[:target//len(classes)]\n start_idx = np.random.randint(\n 0, ds_data.shape[1]-slice_len, size=target//len(classes))\n \n for i in range(pair_idx.size):\n \n n1 = pairs[pair_idx[i]][0]\n n2 = pairs[pair_idx[i]][1]\n assert(network[n1,n2] == c)\n \n start = start_idx[i]\n end = start + slice_len\n \n p1 = np.mean(ds_data[parents[n1], start:end], axis=0)\n p2 = np.mean(ds_data[parents[n2], start:end], axis=0)\n \n examples[count,:,:,0] = np.vstack((\n p1, \n ds_data[n1][start:end], \n G[start:end], \n ds_data[n2][start:end], \n p2\n ))\n \n labels[count,:] = np.equal(classes, c, dtype=np.int32)\n \n if verbose > 0:\n bar.update(count)\n count +=1\n \n if verbose > 0:\n bar.finish()\n print(\n 'Generated examples of shape:', examples.shape,\n '\\nGenerated labels of shape:', labels.shape,\n '\\nThere are {} classes: {}'.format(len(classes), classes)\n )\n \n assert not np.isnan(examples).any()\n return examples, labels", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n files = self.source_data_files(data_dir, tmp_dir, dataset_split)\n vocab = _extract_vocab_data(files)\n\n # Determine the number of instances to generate\n if dataset_split == problem.DatasetSplit.TRAIN:\n num_instances = self.num_train_instances\n else:\n num_instances = self.num_eval_instances\n\n for _ in range(num_instances):\n instance_size = random.randint(self.min_size, self.max_size)\n tokens = random.choices(vocab, k=instance_size)\n instance = ''.join(tokens)\n yield {'inputs': instance, 'targets': instance}", "def build_datasets(self, data_dir: str = None, val_ratio: float = 0.2, num_train_examples: int = None,\n seed: int = 42, download: bool = True, **kwargs):\n if data_dir is None:\n data_dir = os.path.join(os.environ['DATA_DIR'], self.dataset_name)\n\n train_data = self.raw_dataset(data_dir, download=download, train=True, transform=self.train_transforms)\n val_data = self.raw_dataset(data_dir, download=download, train=True, transform=self.train_transforms)\n test_data = self.raw_dataset(data_dir, download=download, train=False, transform=self.test_transforms)\n\n # split train and validation\n train_indices, val_indices = get_split_indices(len(train_data), val_ratio, seed)\n if num_train_examples is not None:\n train_indices = np.random.choice(train_indices, num_train_examples, replace=False)\n train_data = Subset(train_data, train_indices)\n val_data = Subset(val_data, val_indices)\n\n # general way of returning extra information\n info = None\n\n # post-process datasets\n train_data, val_data, test_data, info = self.post_process_datasets(train_data, val_data, test_data, info=info)\n\n # name datasets and save statistics\n for dataset in [train_data, val_data, test_data]:\n dataset.dataset_name = self.dataset_name\n dataset.statistics = (self.means, self.stds)\n\n return train_data, val_data, test_data, info", "def generateCrossValidationSets(dataSets, shuffleSeed=42):\n\n\tembeddedCrossvalidationSets = []\n\tfor dataSet in dataSets:\n\n\t\tallFiles = getAllFiles([dataSet])\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\trandom.seed(shuffleSeed)\n\t\trandom.shuffle(allAroused)\n\t\trandom.shuffle(allNonAroused)\n\n\t\tfor outerIndex in range(0, 5):\n\t\t\tif len(embeddedCrossvalidationSets) <= outerIndex:\n\t\t\t\tembeddedCrossvalidationSets += [{\"outerValidate\": [], \"crossValidate\": []}]\n\n\t\t\touterSet = embeddedCrossvalidationSets[outerIndex]\n\n\t\t\touterAroused = allAroused[outerIndex::5]\n\t\t\touterNonAroused = allNonAroused[outerIndex::5]\n\n\t\t\touterAroused = outerAroused[:len(outerNonAroused)]\n\t\t\touterNonAroused = outerNonAroused[:len(outerAroused)]\n\n\t\t\touterValidateSet = outerAroused + outerNonAroused\n\t\t\trestAroused = list(filter(lambda x: x not in outerValidateSet, allAroused))\n\t\t\trestNonAroused = list(filter(lambda x: x not in outerValidateSet, allNonAroused))\n\n\t\t\tassert(len(list(filter(isAroused, outerValidateSet))) == len(outerValidateSet) / 2)\n\t\t\touterSet[\"outerValidate\"] += outerValidateSet\n\n\t\t\tfor innerIndex in range(0, 5):\n\t\t\t\tif len(outerSet[\"crossValidate\"]) <= innerIndex:\n\t\t\t\t\touterSet[\"crossValidate\"] += [{\"validate\": [], \"train\": []}]\n\n\t\t\t\tcrossValidationSet = outerSet[\"crossValidate\"][innerIndex]\n\n\t\t\t\tvalidatingAroused = restAroused[innerIndex::5]\n\t\t\t\tvalidatingNonAroused = restNonAroused[innerIndex::5]\n\n\t\t\t\tvalidatingAroused = validatingAroused[:len(validatingNonAroused)]\n\t\t\t\tvalidatingNonAroused = validatingNonAroused[:len(validatingAroused)]\n\n\t\t\t\tvalidatingSet = validatingAroused + validatingNonAroused\n\t\t\t\ttrainingSet = list(filter(lambda x: x not in validatingSet, restAroused)) + \\\n\t\t\t\t list(filter(lambda x: x not in validatingSet, restNonAroused))\n\n\t\t\t\tassert(len(list(filter(isAroused, validatingSet))) == len(validatingSet) / 2)\n\t\t\t\t#assert no validate files or testing files are train files\n\t\t\t\tassert(set(trainingSet) - set(validatingSet) == set(trainingSet))\n\t\t\t\tassert(set(trainingSet) - set(outerValidateSet) == set(trainingSet))\n\n\t\t\t\tcrossValidationSet[\"validate\"] += validatingSet\n\t\t\t\tcrossValidationSet[\"train\"] += trainingSet\n\n\treturn embeddedCrossvalidationSets", "def make_datasets(class_names, dataset_dict, path_source, path_dest, seed):\n \n create_directory_structure(path_dest)\n\n path_alldata = [path_source.joinpath(f'label_{class_}')\n for class_ in class_names]\n\n path_imagefiles = [class_path.glob('*.bin')\n for class_path in path_alldata]\n\n size = sum([v for k, v in dataset_dict.items()])\n rng = default_rng(seed)\n\n datasets_by_class = np.array([rng.choice([image_file.name\n for image_file in image_filelist],\n size=size, replace=False)\n for image_filelist in path_imagefiles])\n\n dataset_labels = np.array([np.full(size, class_)\n for class_ in class_names])\n\n if not path_dest.exists():\n path_dest.mkdir(parents=True)\n\n start=0\n for set_name, n_examples in dataset_dict.items():\n stop = start + n_examples\n\n filename = f'{set_name}_set.csv'\n path_file = path_dest.joinpath(filename)\n \n images = datasets_by_class[:,start:stop].flatten()\n labels = dataset_labels[:,start:stop].flatten()\n rows = np.transpose(np.vstack((images, labels))).tolist()\n\n with path_file.open(mode='w', newline='') as f:\n csv_writer = writer(f)\n csv_writer.writerows(rows)\n\n start = n_examples", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def generateTrainAndValidateset(trainSets, validateSets, validatePercentage=20):\n\tvalidateFiles = []\n\ttrainFiles = []\n\n\tfor validateSet in validateSets:\n\t\tif \".\" in validateSet:\n\t\t\tvalidateSet, percentage = validateSet.split(\".\")\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\tvalidateFiles += getAllFiles([validateSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = validatePercentage\n\n\t\tif validateSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid validate set: \" + validateSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[validateSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[validateSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\tvalidateFiles += allAroused[len(allAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\t\tvalidateFiles += allNonAroused[len(allNonAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\n\n\tfor trainSet in trainSets:\n\t\tif \".\" in trainSet:\n\t\t\ttrainSet, percentage = trainSet.split(\".\", 1)\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\ttrainFiles += getAllFiles([trainSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = 100 - validatePercentage\n\t\t\tvalidatePercentage = validatePercentage\n\n\t\tif trainSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid train set: \" + trainSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[trainSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[trainSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allNonAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\n\tif not any(map(lambda x: x.endswith(\".all\"), list(trainSets) + list(validateSets))):\n\t\t#assert no validatefiles are also trainfiles\n\t\tassert(set(trainFiles) - set(validateFiles) == set(trainFiles))\n\t\t#assert an equal amount of aroused and non-aroused validatefiles\n\t\tassert(len(list(filter(isAroused, validateFiles))) == len(validateFiles) / 2)\n\n\treturn trainFiles, validateFiles", "def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader", "def cross_validation_datasets(self, fold):\n if fold > len(self): fold = len(self) / 2\n stratified = self.stratified_bunches(fold)\n datasets = []\n for index in range(len(stratified)):\n gold = GoldInstances(training_as_gold(stratified[index]))\n rest = flatten(stratified[:index]) + flatten(stratified[index + 1:])\n training = TrainingInstances(rest)\n datasets.append((training, gold))\n return datasets", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def dataset_difficulty():\n results = []\n datasets = [ data_2007, data_2012, data_indoor, data_easy ] \n \n for data in datasets:\n \n #Let the user know where we are\n print data\n X,Y = load_csv(data)\n \n # Training/testing split + LDA fit\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n lda = LDA()\n lda.fit(X_train, Y_train)\n \n # Use linear SVC\n clf = svm.SVC(kernel=\"linear\")\n clf.fit(lda.transform(X_train), Y_train)\n \n # Predictions\n train_predict = clf.predict(lda.transform(X_train))\n test_predict = clf.predict(lda.transform(X_test))\n \n #Compute accuracy\n train_acc = 1.*sum(train_predict == Y_train)/len(train_predict)\n test_acc = 1.*sum(test_predict == Y_test)/len(test_predict)\n \n # Append results for that dataset\n results += [ [ data, train_acc, test_acc, clf, lda ] ]\n \n return results", "def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def shuffle_and_split_data(X_genesets, y, train_size, validate_size):\n permutation = np.random.permutation(y.size)\n y_permuted = y[permutation]\n X_genesets_permuted = [Xg[permutation, :] for Xg in X_genesets]\n X_groups_train = [Xg[0:train_size, :] for Xg in X_genesets_permuted]\n X_groups_validate = [Xg[train_size: validate_size + train_size, :] for Xg in X_genesets_permuted]\n X_groups_test = [Xg[validate_size + train_size:, :] for Xg in X_genesets_permuted]\n y_train = y_permuted[0:train_size]\n y_validate = y_permuted[train_size: validate_size + train_size]\n y_test = y_permuted[validate_size + train_size:]\n return X_groups_train, y_train, X_groups_validate, y_validate, X_groups_test, y_test", "def generate_combigen_datasets(\n n_samples_train=config.n_train,\n n_lines_train=config.n_lines,\n line_stats_train=config.line_stats,\n n_samples_val=config.n_val,\n n_lines_val=config.n_lines,\n line_stats_val=config.line_stats,\n n_samples_test=config.n_test,\n n_lines_test=config.n_lines,\n line_stats_test=config.line_stats,\n *args,\n **kwargs,\n ):\n # Training data\n x_train, y_train = generate_combigen_x_y_dataset(\n n_samples=n_samples_train,\n n_lines=n_lines_train,\n line_stats=line_stats_train,\n *args,\n **kwargs)\n\n # Validation data\n x_val, y_val = generate_combigen_x_y_dataset(\n n_samples=n_samples_val,\n n_lines=n_lines_val,\n line_stats=line_stats_val,\n *args,\n **kwargs)\n\n # Testing data\n x_test, y_test = generate_combigen_x_y_dataset(\n n_samples=n_samples_test,\n n_lines=n_lines_test,\n line_stats=line_stats_test,\n *args,\n **kwargs)\n\n # Return as a set of tuples that can also be unpacked into components\n return (x_train, y_train), (x_val, y_val), (x_test, y_test)", "def get_test_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def generate(self, labels, list_IDs, n_classes):\n # Infinite loop\n while 1:\n # Generate order of exploration of dataset\n indexes = self.__get_exploration_order(list_IDs)\n\n # Generate batches\n imax = int(len(indexes)/self.batch_size)\n for i in range(imax):\n # Find list of IDs\n list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]\n print(\"Producing\")\n #print(list_IDs_temp)\n # Generate data\n X, y = self.__data_generation(labels, list_IDs_temp, n_classes)\n # print(X.shape)\n # print(y.shape)\n #print(\"Target Label\")\n #print(y)\n gc.collect()\n yield X, y", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def PrepareSets(args, tokenizer, train_set, dev_set, test_set, first_label=False):\n\n # filter out al instances where the emotion is neutral\n train_set = train_set.filter(lambda example: not 27 in example['labels'])\n dev_set = dev_set.filter(lambda example: not 27 in example['labels'])\n test_set = test_set.filter(lambda example: not 27 in example['labels'])\n\n # remove unnecessary columns\n train_set = train_set.remove_columns(['text', 'id'])\n dev_set = dev_set.remove_columns(['text', 'id'])\n test_set = test_set.remove_columns(['text', 'id'])\n\n # function that creates new instances for all labels\n def handle_multiple_labels(batch):\n new_batch = {'attention_mask': [],\n 'input_ids': [],\n 'labels': [],\n 'token_type_ids': [],\n }\n for instance_idx, instance in enumerate(batch['labels']):\n for label in instance:\n new_batch['attention_mask'].append(batch['attention_mask'][instance_idx])\n new_batch['input_ids'].append(batch['input_ids'][instance_idx])\n new_batch['labels'].append(label)\n new_batch['token_type_ids'].append(batch['token_type_ids'][instance_idx])\n return new_batch\n\n # function that takes the first label\n def handle_first_label(batch):\n batch['labels'] = batch['labels'][0]\n return batch\n\n # check which label function to use\n if first_label:\n label_fn = handle_first_label\n batched = False\n else:\n label_fn = handle_multiple_labels\n batched = True\n\n # filter the labels\n train_set = train_set.map(label_fn, batched=batched)\n dev_set = dev_set.map(label_fn, batched=batched)\n test_set = test_set.map(label_fn, batched=batched)\n\n # return the prepared datasets\n return train_set, dev_set, test_set", "def _sample_mini_dataset(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def get_datasets(\n self, stage: str, num_samples_per_class: int = None\n ) -> \"OrderedDict[str, Dataset]\":\n num_samples_per_class = num_samples_per_class or 320\n\n datasets = super().get_datasets(stage=stage)\n datasets[\"train\"] = {\n \"dataset\": datasets[\"train\"],\n \"sampler\": BalanceClassSampler(\n labels=datasets[\"train\"].targets, mode=num_samples_per_class\n ),\n }\n return datasets", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets", "def get_datasets(sim_args):\n if len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'all':\n data_tags = [\n 'Webscope_C14_Set1',\n 'Webscope_C14_Set2',\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'CIKM2017':\n data_tags = [\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'letor64':\n data_tags = [\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n ]\n # random.shuffle(data_tags)\n else:\n data_tags = sim_args.data_folders\n for data_tag in data_tags:\n assert data_tag in DATASET_COLLECTION, 'Command line input is currently not supported.'\n yield DATASET_COLLECTION[data_tag]", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def splitting_to_datasets(entities, training_percent, testing_percent, num_epochs,\n path=VisualGenome_DATASETS_PICKLES_PATH, config=None):\n\n # Load datasets from cache\n if config is not None and config.use_cache_dir:\n train_dataset_path = os.path.join(config.loading_model_folder, TRAIN_DATA_SET)\n test_dataset_path = os.path.join(config.loading_model_folder, TEST_DATA_SET)\n validation_dataset_path = os.path.join(config.loading_model_folder, VALIDATION_DATA_SET)\n print(\"Loading cached data-sets: training-{0}, testing-{1} and valiation-{2}\".format(train_dataset_path,\n test_dataset_path,\n validation_dataset_path))\n train_imgs = cPickle.load(open(train_dataset_path, 'rb'))\n test_imgs = cPickle.load(open(test_dataset_path, 'rb'))\n val_imgs = cPickle.load(open(validation_dataset_path, 'rb'))\n\n print(\"Debug printing- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(train_imgs), len(test_imgs), len(val_imgs)))\n\n return train_imgs, test_imgs, val_imgs\n\n number_of_samples = len(entities)\n train_size = int(number_of_samples * training_percent)\n test_size = int(number_of_samples * testing_percent)\n validation_size = number_of_samples - (train_size + test_size)\n\n if not train_size + test_size + validation_size == number_of_samples:\n error_msg = 'Data size of (train + test + validation) is {0} and should be number of labels: {1}'.format(\n train_size + test_size + validation_size, number_of_samples)\n print(error_msg)\n raise Exception(error_msg)\n\n # Create a numpy array of indices of the data\n indices = np.arange(len(entities))\n # Shuffle the indices of the data\n random.shuffle(indices)\n\n # Get the train + test + val dataset\n train_imgs = entities[indices[:train_size]]\n test_imgs = entities[indices[train_size:train_size + test_size]]\n val_imgs = entities[indices[train_size + test_size:]]\n\n # Take the round number of each dataset per the number of epochs\n # num_of_samples_per_train_updated = len(train_imgs) / num_epochs * num_epochs\n # train_imgs = train_imgs[:num_of_samples_per_train_updated]\n # num_of_samples_per_test_updated = len(test_imgs) / num_epochs * num_epochs\n # test_imgs = test_imgs[:num_of_samples_per_test_updated]\n # num_of_samples_per_val_updated = number_of_samples - num_of_samples_per_train_updated - num_of_samples_per_test_updated\n # val_imgs = val_imgs[:num_of_samples_per_val_updated]\n\n # print(\"Debug printing- the number of train samples: {0}, the number of test samples: {1}, \"\n # \"the number of validation samples: {2}\".format(num_of_samples_per_train_updated,\n # num_of_samples_per_test_updated,\n # num_of_samples_per_val_updated))\n\n # Save train-set and test-set and validation-set\n pickle_dataset(train_imgs, test_imgs, val_imgs, path)\n return train_imgs, test_imgs, val_imgs", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def rand_checkers(n1=100, n2=100, n3=100, n4=100, sigma=0.1):\n nb1 = n1 // 8\n nb2 = n2 // 8\n nb3 = n3 // 8\n nb4 = n4 // 8\n\n xapp = np.reshape(np.zeros((nb1 + nb2 + nb3 + nb4) * 16),\n [(nb1 + nb2 + nb3 + nb4) * 8, 2])\n yapp = np.ones((nb1 + nb2 + nb3 + nb4) * 8)\n idx = 0\n nb = 2 * nb1\n for i in range(-2, 2):\n for j in range(-2, 2):\n yapp[idx:(idx + nb)] = [fmod(i - j + 100, 4)] * nb\n xapp[idx:(idx + nb), 0] = np.random.rand(nb)\n xapp[idx:(idx + nb), 0] += i + sigma * np.random.randn(nb)\n xapp[idx:(idx + nb), 1] = np.random.rand(nb)\n xapp[idx:(idx + nb), 1] += j + sigma * np.random.randn(nb)\n idx += nb\n\n ind = np.arange((nb1 + nb2 + nb3 + nb4) * 8)\n np.random.shuffle(ind)\n res = np.hstack([xapp, yapp[:, np.newaxis]])\n return np.array(res[ind, :])", "def fit(self, X_train, y_train):\n for i in range(self.N):\n h = RandomDecisionTree(candidate_splits=self.candidate_splits, depth=self.max_depth)\n h = h.fit(*self.bootstrap(X_train, y_train))\n self.learners.append(h)", "def get_dataset(test_envs, args, hparams, algorithm_class=None):\n is_mnist = \"MNIST\" in args.dataset\n dataset = vars(datasets)[args.dataset](args.data_dir, test_envs)\n # if not isinstance(dataset, MultipleEnvironmentImageFolder):\n # raise ValueError(\"SMALL image datasets are not implemented (corrupted), for transform.\")\n\n in_splits = []\n out_splits = []\n for env_i, env in enumerate(dataset):\n # The split only depends on seed_hash (= trial_seed).\n # It means that the split is always identical only if use same trial_seed,\n # independent to run the code where, when, or how many times.\n out, in_ = split_dataset(\n env,\n int(len(env) * args.holdout_fraction),\n misc.seed_hash(args.trial_seed, env_i),\n )\n if env_i in test_envs:\n in_type = \"test\"\n out_type = \"test\"\n else:\n in_type = \"train\"\n out_type = \"valid\"\n\n if is_mnist:\n in_type = \"mnist\"\n out_type = \"mnist\"\n\n set_transfroms(in_, in_type, hparams, algorithm_class)\n set_transfroms(out, out_type, hparams, algorithm_class)\n\n if hparams[\"class_balanced\"]:\n in_weights = misc.make_weights_for_balanced_classes(in_)\n out_weights = misc.make_weights_for_balanced_classes(out)\n else:\n in_weights, out_weights = None, None\n in_splits.append((in_, in_weights))\n out_splits.append((out, out_weights))\n\n return dataset, in_splits, out_splits", "def test_trainGenerator():\n\n # check type\n assert isinstance(trainset, surprise.trainset.Trainset)\n\n # the number of users in trainset should be equal to the user from database plus 1\n assert len(trainset.all_users()) == len(svd.song_df.user_id.unique())+1", "def get_train_test_lists(dataset_path, classes=('glare_small', 'normal'), test_size=0.25):\n image_set = []\n label_set = []\n for cls in classes:\n dir = os.path.join(dataset_path, cls)\n img_list = glob.glob(dir + '/*.png')\n img_list.extend(glob.glob(dir + '/*.jpg'))\n label = None\n if cls == 'glare_small' or cls == 'glare':\n label = 1\n if cls == 'normal':\n label = 0\n\n labels = list(itertools.repeat(label, len(img_list)))\n image_set.extend(img_list)\n label_set.extend(labels)\n X_train, X_test, y_train, y_test = train_test_split(image_set, label_set, test_size=test_size, random_state=42)\n return X_train, X_test, y_train, y_test", "def get_generators_from_ds(dataset):\n data_train = tf.data.Dataset.from_tensor_slices(\n (dataset['x_train'], dataset['y_train']))\n data_test = tf.data.Dataset.from_tensor_slices(\n (dataset['x_test'], dataset['y_test']))\n\n return data_train, data_test", "def generate_dataset(self):\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()", "def gen_testsets(feats, sparse, non_neg, kind='bicluster', **kwargs):\n\n if kind == 'bicluster':\n generator = make_biclusters\n elif kind == 'checkerboard':\n generator = make_checkerboard\n else:\n raise ValueError('Invalid generator function: `{}`'.format(kind))\n\n datasets, rows, columns = {}, {}, {}\n for key_num, key in enumerate(feats.index):\n datasets[key], rows[key], columns[key] = gen_testdata(\n generator,\n feats.loc[key, :],\n sparse=sparse[key_num], non_neg=non_neg[key_num],\n **kwargs\n )\n\n return datasets, rows, columns", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,\n n_samples, use_one_hot, class_partition,\n upsample_control):\n # modify seq_len in case we do upsampling control\n if upsample_control:\n upsample_factor = seq_len\n seq_len = 1\n if not self.two_class:\n raise NotImplementedError()\n\n # construct all possible classes\n classes = [\"\".join(seq) for seq in \\\n itertools.product(\"01\", repeat=seq_len)]\n\n # get the right number of samples per class to get a balanced data set\n # with the desired n_samples.\n num = n_samples\n div = len(classes)\n n_samples_per_class = [num // div + (1 if x < num % div else 0) \\\n for x in range (div)]\n\n # find indices of samples with the wanted digit class\n y_data = [np.argmax(y) for y in y_data]\n digit_idx = []\n digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])\n digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])\n\n # generate samples for every class\n samples = []\n labels = []\n for i,c in enumerate(classes):\n this_label = i\n digits_to_sample = [int(c[i]) for i in range(len(c))]\n for s in range(n_samples_per_class[i]):\n this_sample = None\n for d in digits_to_sample:\n rand_idx = self._rstate.randint(len(digit_idx[d]))\n sample_idx = digit_idx[d][rand_idx]\n digit_sample = x_data[sample_idx]\n if this_sample is None:\n this_sample = digit_sample\n else:\n this_sample = np.vstack((this_sample,digit_sample)) \n samples.append(this_sample)\n labels.append(this_label)\n\n # if configured sort labels into 2 classes\n labels = np.asarray(labels)\n if self.two_class and not upsample_control:\n lbl_mask = np.isin(labels, class_partition)\n labels[~lbl_mask] = 0\n labels[lbl_mask] = 1\n\n if upsample_control:\n for i,s in enumerate(samples):\n # Initial timestep is absolute start position of digit. To\n # translate to a higher resolution image, we can just multiply\n # the abolute position vby the scaling factor.\n upsample = s[0,:]*upsample_factor\n for t in np.arange(1,s.shape[0]):\n # don't do upsampling at end of strokes or end of digits\n if all((s[t,2] == 0, s[t,3] == 0)):\n # Repeat original stroke \"upsample_factor\" times, such\n # that the relative stroke length is identical if\n # images are normalized to same resolution.\n for k in range(upsample_factor):\n upsample = np.vstack((upsample, s[t,:]))\n else:\n upsample = np.vstack((upsample, s[t,:]))\n samples[i] = upsample\n\n # structure output data\n out_data = labels.reshape(-1, 1)\n if use_one_hot:\n n_classes = 2**seq_len\n if self.two_class:\n n_classes = 2\n\n # FIXME We shouldn't call this method if the validation set size is\n # zero.\n if out_data.size == 0:\n out_data = np.matlib.repmat(out_data, 1, n_classes)\n else:\n # FIXME use internal method `_to_one_hot` and set required class\n # attributes beforehand.\n one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])\n one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)\n out_data = one_hot_encoder.transform(out_data).toarray()\n\n if self.target_per_timestep:\n out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)\n\n # structure input data\n in_data = np.zeros((n_samples,max_seq_len,4))\n sample_lengths = np.zeros(n_samples)\n for i,s in enumerate(samples):\n in_data[i,:s.shape[0],:] = s\n sample_lengths[i] = s.shape[0]\n\n in_data = self._flatten_array(in_data)\n\n return in_data, out_data, sample_lengths", "def get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def create_samples(descriptions, candidates_lists, labels):\n samples = []\n for description, candidate_list, label in tqdm(\n zip(descriptions, candidates_lists, labels)\n ):\n negative_examples = [\n candidate for candidate in candidate_list if candidate != label\n ]\n negative_examples = random.choices(negative_examples, k=4)\n if label in candidate_list:\n positive_example = [\n candidate for candidate in candidate_list if candidate == label\n ][0]\n else:\n positive_example = label\n samples.append(InputExample(texts=[description, positive_example], label=1))\n for neg_ex in negative_examples:\n samples.append(InputExample(texts=[description, neg_ex], label=0))\n return samples", "def init_benchmark_data(\n num_inputs, input_size, num_classes, rand_seed=None,\n **kwargs\n):\n N, D, C = num_inputs, input_size, num_classes\n\n rs = np.random.RandomState(seed=rand_seed)\n X = rs.rand(N, D)\n y = rs.choice(C, size=N)\n return X, y", "def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def train_valid_test_datasets_provider_bert():\n args = get_args()\n\n print_rank_0('> building train, validation, and test datasets '\n 'for BERT ...')\n from megatron.data.dataset_utils import build_train_valid_test_datasets\n train_ds, valid_ds, test_ds = build_train_valid_test_datasets(\n data_prefix=args.data_path,\n data_impl=args.data_impl,\n splits_string=args.split,\n train_valid_test_num_samples=[1,1,1], # Just dummy numbers since we assume args.train_data_exact_num_epochs will override them\n max_seq_length=args.seq_length,\n masked_lm_prob=args.mask_prob,\n short_seq_prob=args.short_seq_prob,\n seed=args.seed,\n skip_warmup=(not args.mmap_warmup),\n binary_head=args.bert_binary_head)\n print_rank_0(\"> finished creating BERT datasets ...\")\n\n return train_ds, valid_ds, test_ds", "def main(unused_argv):\n del unused_argv\n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n\n tfds_cached_dict = {}\n data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None\n name = FLAGS.dataset_name\n tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)\n dataset_dict = tfds_cached_dict[name]\n dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(\n dataset_dict[tfds.Split.TRAIN])\n dataset_dict[tfds.Split.TEST] = tfds.as_numpy(\n dataset_dict[tfds.Split.TEST])\n # To mock the API of tfds.load to cache the downloaded datasets.\n # Used as an argument to `get_dataset`.\n def load_fn(name, data_dir=None, batch_size=-1):\n # This function will always return the whole dataset.\n assert batch_size == -1\n del data_dir\n del batch_size\n return tfds_cached_dict[name]\n class_ids = sorted([int(x) for x in FLAGS.class_ids])\n num_classes = len(class_ids)\n for i in range(num_classes):\n for j in range(i+1, num_classes):\n print('Generating pos {} neg {}'.format(i, j))\n positive_class = class_ids[i]\n negative_class = class_ids[j]\n random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)\n for seed in random_seeds:\n dataset = create_projected_binary_dataset(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.num_train_examples, FLAGS.num_valid_examples,\n FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)\n filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.projected_dim, seed)\n serialized_dataset = dataset.SerializeToString()\n\n with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:\n f.write(serialized_dataset)", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def split_dataset(X, Y, train_size=0.8):\n if train_size != 1.0:\n return train_test_split(\n X, Y,\n train_size=train_size,\n stratify=Y\n )\n else:\n X_, Y_ = shuffle(\n X, Y\n )\n return X_, [], Y_, []", "def default_generator(self,\n dataset,\n epochs=1,\n predict=False,\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n if not predict:\n print('Starting epoch %i' % epoch)\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n\n feed_dict = dict()\n if y_b is not None and not predict:\n for index, label in enumerate(self.labels_fd):\n if self.mode == \"classification\":\n feed_dict[label] = to_one_hot(y_b[:, index])\n if self.mode == \"regression\":\n feed_dict[label] = y_b[:, index:index + 1]\n if w_b is not None:\n feed_dict[self.weights] = w_b\n # Transform SMILES string to integer vectors\n smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]\n feed_dict[self.smiles_seqs] = np.stack(smiles_seqs, axis=0)\n yield feed_dict", "def _train_pipeline(self, ds_images, ds_labels):\n train_count = self.n_examples - self.val_count\n steps_per_epoch = int(train_count // self.batch_size)\n repeat_count = self.n_epochs * steps_per_epoch\n\n ds_zip = tf.data.Dataset.zip((ds_images, ds_labels))\n ds = (ds_zip.shuffle(train_count, seed=self.seed+10,\n reshuffle_each_iteration=True)\n .repeat(count=repeat_count)\n .batch(self.batch_size)\n .prefetch(3))\n\n return ds", "def get_datasets_and_generator(args, no_target=False):\n # Define datasets.\n uniform_dataset = datasets.UniformRVDataset(num_samples=args.num_samples,\n shape=args.in_shape)\n uniform_dataloader = torch.utils.data.DataLoader(uniform_dataset,\n batch_size=args.batch_size)\n # Define generator model (simple fully-connected with ReLUs).\n generator = torch.nn.Sequential(\n torch.nn.Linear(args.in_shape, 5),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(5, 5),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(5, 5),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(5, args.out_shape),\n # torch.nn.Tanh()\n )\n if no_target:\n return uniform_dataloader, generator\n else:\n normal_dataset = datasets.NormalRVDataset(num_samples=args.num_samples,\n shape=args.out_shape,\n static_sample=not args.dynamic_sample)\n normal_dataloader = torch.utils.data.DataLoader(normal_dataset,\n batch_size=args.batch_size)\n return uniform_dataloader, normal_dataloader, generator", "def split_train_datasets(self):\n\n available_alphabets = list(self.train_dictionary.keys())\n number_of_alphabets = len(available_alphabets)\n\n train_indexes = random.sample(\n range(0, number_of_alphabets - 1), int(0.8 * number_of_alphabets))\n\n # If we sort the indexes in reverse order we can pop them from the list\n # and don't care because the indexes do not change\n train_indexes.sort(reverse=True)\n\n for index in train_indexes:\n self._train_alphabets.append(available_alphabets[index])\n available_alphabets.pop(index)\n\n # The remaining alphabets are saved for validation\n self._validation_alphabets = available_alphabets\n self._evaluation_alphabets = list(self.evaluation_dictionary.keys())", "def split_dataset(dset, batch_size=128, thread_count=4):\n sampler_dset_train = data.sampler.SubsetRandomSampler(list(range(int(0.7*len(dset)))))\n sampler_dset_test = data.sampler.SubsetRandomSampler(list(range(int(0.7*len(dset)),\n int(0.85*len(dset)))))\n sampler_dset_validation = data.sampler.SubsetRandomSampler(list(range(int(0.85*len(dset)),\n len(dset))))\n\n loader_dset_train = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_train)\n loader_dset_test = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_test)\n loader_dset_validation = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_validation)\n\n return loader_dset_train, loader_dset_test, loader_dset_validation", "def train_dts_independence(observations,targets,method='bagging'):\n n_targets = len(targets[0])\n # we need to know the number of individuals n such that n*(n-1)/2 = n_targets\n # we do that iteratively\n n_individuals = get_number_of_individuals(n_targets)\n\n tars = np.array(targets)\n\n # we want to use all training data available for only one classifier, so we create data from all pairs\n training_input = []\n training_output = []\n for i in range(n_individuals-1):\n for j in range(i+1,n_individuals):\n # basically, for each pair\n # we get individual values AND pairwise values\n i_general = observations[i]['general']\n j_general = observations[j]['general']\n i_senses_j = observations[i][j]\n j_senses_i = observations[j][i]\n for timestamp in range(len(i_general)):\n # THE ORDER IS EXTREMELY IMPORTANT (to be consistent)\n training_input.append(i_general[timestamp] + j_general[timestamp] + i_senses_j[timestamp] + j_senses_i[timestamp])\n\n # also, we add the output, ordered\n pair_targets = tars[:,get_edge_index((i,j),n_individuals)].tolist()\n training_output += pair_targets\n\n # now we simply train\n\n dt = None\n if method == 'bagging': dt = BaggingClassifier(tree.DecisionTreeClassifier(),n_estimators=100,max_samples=0.5, max_features=1.)\n elif method == 'random_forest': dt = RandomForestClassifier(n_estimators=100)\n elif method == 'boosting': dt = AdaBoostClassifier(n_estimators=100)\n else: dt = tree.DecisionTreeClassifier()\n # the dt cannot be trained if the outputs are all equal. In that case, we create a fake dt\n if len(set(training_output)) > 1:\n # We want to have a balanced data set while training.\n bal_observations, bal_tar = sample_balanced_dataset(training_input,training_output) #from data_manipulation\n dt.fit(bal_observations,bal_tar)\n else:\n dt = FakeClassifier(training_output[0])\n return dt", "def maybe_generate_data(data_dir,\n shape=None,\n num_examples=None,\n stone_probability=0.45,\n num_files=2):\n dest_dir = os.path.join(data_dir, \"batches-bin\")\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n # Log hook to measure progress\n # TODO: not in use\n def _progress(count, block_size, total_size):\n sys.stdout.write(\"\\r>> Generating %s %.1f%%\" % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n # generate training batches\n # constrained\n filenames = [\"data_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")\n\n # generate testing batches\n # random\n # TODO: generate random dataset\n filenames = [\"test_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n # utils.generate_dataset(filepath, _progress, **{\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")", "def create_train_test_sets(self,x,y,lenTest):\n \n nbInd = x.shape[0]\n shuffler = np.random.permutation(nbInd)\n x_train = x[shuffler][0:(nbInd-lenTest),]\n y_train = y[shuffler][0:(nbInd-lenTest),]\n\n x_test = x[shuffler][(nbInd-lenTest):nbInd,]\n y_test = y[shuffler][(nbInd-lenTest):nbInd,]\n\n return x_train,y_train,x_test,y_test", "def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])", "def generate_dataset(self):\n sets = {\n \"train\": 10,\n \"test\": 5,\n }\n\n fields = {\n \"strings_list\": lambda x: str_to_ascii(self.generate_string_list(x)),\n \"data\": lambda x: np.random.randint(0, 10, (x, 10)),\n \"number\": lambda x: np.array(range(x)),\n \"field_with_a_long_name_for_printing\": lambda x: np.array(range(x)),\n }\n\n lists = {\n \"list_dummy_data\": np.array(range(10)),\n \"list_dummy_number\": np.array(range(10), dtype=np.uint8),\n }\n\n dataset = {}\n data_fields = {}\n for set_name in sets:\n dataset[set_name] = self.populate_set(sets[set_name], fields, lists)\n data_fields[set_name] = sorted(dataset[set_name].keys())\n\n return dataset, data_fields", "def generate_inputs_files(dataset_name='mit67', graph=None, one_hot_labels_list=None, bottlenecks=None):\n graph.write_graphmlz(join(DATA_DIR, 'graph.net'))\n indices = [i for i in range(len(one_hot_labels_list))]\n\n y = []\n for one_hot in one_hot_labels_list:\n y.append(one_hot_to_label(one_hot=one_hot))\n\n X = []\n for k, i in enumerate(indices):\n # compare_labels(bottleneck_file=bottlenecks[i], other=y[k])\n # Example: 'Home*winecellar*wine_storage_42_02_altavista.jpg.txt\n if not y[k] == bottlenecks[i].split('*')[0]:\n raise Exception('Feature representation not matching with one-hot representation')\n\n filename = bottlenecks[i].split('*')[1] + '*' +bottlenecks[i].split('*')[2]\n bottlenecks_values = load_bottleneck_values(bottlenecskpath=BOTTLENECK_PATH,\n bottleneck_file=filename)\n for values in bottlenecks_values:\n X.append(values)\n\n allx, tx, ally, ty, allx_indices, X_test_indices = train_test_split(X, y, indices, stratify=y,\n test_size=TESTING_PERCENTAGE)\n\n ally = [global_class_to_one_hot(global_class=ally_) for ally_ in ally]\n labels = graph.vs['label']\n verify_labels_order(graph_labels=labels, y_test_labels=ty, x_test_indices=X_test_indices)\n ty = [global_class_to_one_hot(global_class=ty_) for ty_ in ty]\n\n allx_indices = [i for i in range(len(allx_indices))]\n # x e y are samples with labels from training data\n # x_ e y_ are samples with no labels from training data\n x_, x, y_, y, x_train_indices, x_test_indices = train_test_split(allx, ally, allx_indices, stratify=ally,\n test_size=TESTING_PERCENTAGE)\n x = sparse.csr_matrix(x)\n tx = sparse.csr_matrix(tx)\n allx = sparse.csr_matrix(allx)\n y = np.array(y)\n ty = np.array(ty)\n ally = np.array(ally)\n\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.x'), object_=x)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.tx'), object_=tx)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.allx'), object_=allx)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.y'), object_=y)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.ty'), object_=ty)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.ally'), object_=ally)\n save_object(file_name=join(DATA_DIR, 'ind.' + dataset_name + '.test.index'), object_=X_test_indices)\n save_graph_as_dict(graph=graph)", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def get_train_test_gens(\n *data_dicts: DataDict, splitter=ShuffleSplit, splitter_opts, **genbatchopts\n):\n\n if isinstance(splitter, type):\n splitter = splitter(**splitter_opts)\n else:\n if splitter_opts:\n warn(\n \"Passed splitter_opts, but splitter is an existing object.\"\n \"They will be ignored.\"\n )\n\n index_arrays = splitter.split()\n\n out = []\n for ixes in index_arrays:\n out.append(\n generate_batches(*data_dicts, bound_ixes=ixes, **genbatchopts)\n )\n\n return tuple(out)", "def build_toy_dataset(N):\n y_data = np.random.uniform(-10.5, 10.5, N)\n r_data = np.random.normal(size=N) # random noise\n x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0\n x_data = x_data.reshape((N, 1))\n return train_test_split(x_data, y_data, random_state=42)", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def get_datasets(business_data_file, enter_data_file, politics_data_file, sport_data_file, tech_data_file):\n # Load data from files\n business_examples = list(open(business_data_file, \"r\").readlines())\n business_examples = [s.strip() for s in business_examples]\n enter_examples = list(open(enter_data_file, \"r\").readlines())\n enter_examples = [s.strip() for s in enter_examples]\n politics_examples = list(open(politics_data_file, \"r\").readlines())\n politics_examples = [s.strip() for s in politics_examples]\n sport_examples = list(open(sport_data_file, \"r\").readlines())\n sport_examples = [s.strip() for s in sport_examples]\n tech_examples = list(open(tech_data_file, \"r\").readlines())\n tech_examples = [s.strip() for s in tech_examples]\n\n datasets = dict()\n datasets['data'] = business_examples + enter_examples + politics_examples + sport_examples + tech_examples\n target = [0 for x in business_examples] + [1 for x in enter_examples] + [2 for x in politics_examples] + [3 for x in sport_examples] + [4 for x in tech_examples]\n datasets['target'] = target\n datasets['target_names'] = ['business_examples', 'enter_examples', 'politics_examples', 'sport_examples', 'tech_examples']\n return datasets", "def load_datasets(args, train_test_split=0):\n logger.info(\"Loading data...\")\n df_data_path = \"./data/df_data.pkl\"\n graph_path = \"./data/text_graph.pkl\"\n if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):\n logger.info(\"Building datasets and graph from raw data... Note this will take quite a while...\")\n generate_text_graph(args.train_data, args.infer_data, args.max_vocab_len)\n df_data = load_pickle(\"df_data.pkl\")\n G_dict = load_pickle(\"text_graph.pkl\")\n G = G_dict[\"graph\"]\n \n if train_test_split == 0:\n infer_idx_start = G_dict[\"infer_idx_start\"]\n del G_dict\n \n logger.info(\"Building adjacency and degree matrices...\")\n A = nx.to_numpy_matrix(G, weight=\"weight\"); A = A + np.eye(G.number_of_nodes())\n degrees = []\n for d in G.degree(weight=None):\n if d == 0:\n degrees.append(0)\n else:\n degrees.append(d[1]**(-0.5))\n degrees = np.diag(degrees)\n X = np.eye(G.number_of_nodes()) # Features are just identity matrix\n A_hat = degrees@A@degrees\n f = X # (n X n) X (n X n) x (n X n) X (n X n) input of net\n \n if train_test_split == 1:\n logger.info(\"Splitting labels for training and inferring...\")\n ### stratified test samples\n test_idxs = []\n for b_id in df_data[\"label\"].unique():\n dum = df_data[df_data[\"label\"] == b_id]\n if len(dum) >= 4:\n test_idxs.extend(list(np.random.choice(dum.index, size=round(args.test_ratio*len(dum)), replace=False)))\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n # select only certain labelled nodes for semi-supervised GCN\n selected = []\n for i in range(len(df_data)):\n if i not in test_idxs:\n selected.append(i)\n save_as_pickle(\"selected.pkl\", selected)\n else:\n logger.info(\"Preparing training labels...\")\n test_idxs = [i for i in range(infer_idx_start, len(df_data))]\n selected = [i for i in range(infer_idx_start)]\n save_as_pickle(\"selected.pkl\", selected)\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n \n f_selected = f[selected]; f_selected = torch.from_numpy(f_selected).float()\n f_not_selected = f[test_idxs]; f_not_selected = torch.from_numpy(f_not_selected).float()\n labels_selected = list(df_data.loc[selected]['label'])\n if train_test_split == 1: \n labels_not_selected = list(df_data.loc[test_idxs]['label'])\n else:\n labels_not_selected = []\n \n f = torch.from_numpy(f).float()\n save_as_pickle(\"labels_selected.pkl\", labels_selected)\n save_as_pickle(\"labels_not_selected.pkl\", labels_not_selected)\n logger.info(\"Split into %d train and %d test lebels.\" % (len(labels_selected), len(labels_not_selected)))\n return f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs", "def build_multiple_datasets(\n template_dataset='ALLEN_st_cells_1_movies',\n template_experiment='ALLEN_st_selected_cells_1',\n model_structs='ALLEN_st_selected_cells_1',\n this_dataset_name='MULTIALLEN_',\n cluster=False,\n print_info=False,\n N=16):\n main_config = Allen_Brain_Observatory_Config()\n\n # Remove any BP-CC repos in the path\n bp_cc_paths = [\n x for x in sys.path if 'contextual' in x or 'Contextual' in x]\n [sys.path.remove(x) for x in bp_cc_paths]\n\n # Append the BP-CC repo to this python path\n if cluster:\n cc_path = main_config.cluster_cc_path\n else:\n cc_path = main_config.cc_path\n main_config.cc_data_dir = os.path.join(\n cc_path,\n 'dataset_processing') # Pass to encode_datasets.py\n sys.path.append(cc_path)\n import experiments # from BP-CC\n # from db import credentials\n exps = experiments.experiments()\n # db_config = credentials.postgresql_connection()\n\n # Query all neurons for an experiment setup\n queries = [ # MICHELE: ADD LOOP HERE\n [{ # DO THIS SEPARATELY\n 'rf_coordinate_range': { # Get all cells\n 'x_min': -10000,\n 'x_max': 10000,\n 'y_min': -10000,\n 'y_max': 10000,\n },\n 'structure': 'VISam',\n 'this_dataset_name': 'MULTIALLEN_VISam'\n }]\n ]\n filter_by_stim = [\n 'natural_movie_one',\n 'natural_movie_two'\n ]\n sessions = [\n 'three_session_C',\n 'three_session_C2'\n ]\n print 'Pulling cells by their RFs and stimulus: %s.' % filter_by_stim\n all_data_dicts = query_neurons_rfs(\n queries=queries,\n filter_by_stim=filter_by_stim,\n sessions=sessions)\n\n # Check if a weight sharing is needed\n assert len(flatten_list(all_data_dicts)) > 0, 'No cells in this query.'\n\n # Print cell information if requested\n if print_info:\n cre_lines = [x['cre_line'] for x in all_data_dicts[0]]\n cre_lines, cre_counts = np.unique(cre_lines, return_counts=True)\n cre_list = [(x, y) for x, y in zip(cre_lines, cre_counts)]\n print 'Found the following cre line promotors: %s' % json.dumps(\n cre_list)\n return\n\n # Prepare datasets\n dataset_method = declare_allen_datasets()[template_dataset]()\n if dataset_method['weight_sharing']:\n gridded_rfs, rf_size = create_grid_queries(all_data_dicts[0])\n if dataset_method['grid_query']:\n all_data_dicts = query_neurons_rfs(\n queries=gridded_rfs,\n filter_by_stim=filter_by_stim,\n sessions=sessions)\n all_data_dicts = [\n x for x in all_data_dicts if x != []] # Filter empties.\n downsample = dataset_method[\n 'process_stimuli']['natural_scenes']['resize'][0] /\\\n dataset_method['cc_repo_vars']['model_im_size'][0]\n filter_size = calculate_rf_size(\n rf_size=rf_size,\n downsample=downsample)\n else:\n filter_size = None\n\n # Declare the experiment template\n if dataset_method['st_conv']:\n # Dynamic dataset\n exp_method_template = os.path.join(\n main_config.exp_method_template_dir,\n '3d_exp_method_template.txt')\n else:\n # Static dataset\n exp_method_template = os.path.join(\n main_config.exp_method_template_dir,\n '2d_exp_method_template.txt')\n\n # Prepare directories\n model_directory = os.path.join(\n cc_path,\n 'models',\n 'structs')\n model_templates = glob(\n os.path.join(\n model_directory,\n model_structs,\n '*.py'))\n experiment_file = os.path.join(cc_path, 'experiments.py')\n\n # Loop through each query and build all possible datasets with template\n ts = get_dt_stamp()\n session_name = int(''.join(\n [random.choice(string.digits) for k in range(N//2)]))\n for ni, q in enumerate(all_data_dicts):\n assert len(q), 'Cell dictionary is empty.'\n meta_dir = os.path.join(\n main_config.multi_exps,\n '%s_cells_%s' % (len(q), ts))\n make_dir(meta_dir)\n if dataset_method['weight_sharing']:\n print 'Preparing dataset %s/%s.' % (\n ni,\n len(all_data_dicts))\n rf_grid = rf_extents(q)\n rf_dict = q[0]\n rf_dict['on_center_x_max'] = rf_grid['x_max']\n rf_dict['on_center_y_max'] = rf_grid['y_max']\n rf_dict['on_center_x'] = rf_grid['x_min']\n rf_dict['on_center_y'] = rf_grid['y_min']\n if 'this_dataset_name' in queries[ni][0].keys():\n this_dataset_name = queries[ni][0]['this_dataset_name']\n process_dataset(\n dataset_method=dataset_method,\n rf_dict=rf_dict,\n this_dataset_name=this_dataset_name,\n model_directory=model_directory,\n model_templates=model_templates,\n exps=exps,\n template_experiment=template_experiment,\n session_name=session_name,\n meta_dir=meta_dir,\n db_config=credentials, # db_config,\n experiment_file=experiment_file,\n main_config=main_config,\n idx=ni,\n cluster=cluster,\n filter_size=filter_size,\n exp_method_template=exp_method_template)\n else:\n for idx, rf_dict in enumerate(q):\n print 'Preparing dataset %s/%s in package %s/%s.' % (\n idx,\n len(q),\n ni,\n len(all_data_dicts))\n process_dataset(\n dataset_method=dataset_method,\n rf_dict=rf_dict,\n this_dataset_name=this_dataset_name,\n model_directory=model_directory,\n model_templates=model_templates,\n exps=exps,\n template_experiment=template_experiment,\n session_name=session_name,\n meta_dir=meta_dir,\n db_config=credentials, # db_config,\n experiment_file=experiment_file,\n main_config=main_config,\n idx=idx,\n cluster=cluster,\n exp_method_template=exp_method_template)", "def create_simple_data_set(\n n_training_points,\n n_testing_points,\n low=0,\n high=3,\n mode=training_testing_split.SEPERATE,\n kernel=kernel_matern,\n shuffle=True,\n):\n gp = gaussian_process(kernel=kernel, verbose=True)\n\n mid = (low + high) / 2\n\n if mode == training_testing_split.SEPERATE_LONG:\n x_training, x_testing = __seperate_long(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.SEPERATE:\n x_training, x_testing = __seperate(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.INTERSPREAD:\n x_training, x_testing = __interspread(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.RANDOM:\n x_training, x_testing = __random(n_training_points, n_testing_points, low, high)\n elif mode == training_testing_split.MIXED:\n\n def r(z):\n dist = np.random.randint(low=1, high=100, size=4)\n λ = lambda x: x / dist.sum()\n vfunc = np.vectorize(λ)\n dist = vfunc(dist)\n return (z * dist).round().astype(int)\n\n training_dist = r(n_training_points)\n testing_dist = r(n_testing_points)\n x1, x2 = __random(training_dist[0], testing_dist[0], low, high)\n x11, x22 = __interspread(training_dist[1], testing_dist[1], low, high)\n x111, x222 = __interspread(training_dist[2], testing_dist[2], low, high)\n x1111, x2222 = __seperate(training_dist[3], testing_dist[3], low, high)\n x_training = np.vstack([x1, x11, x111, x1111])\n x_testing = np.vstack([x2, x22, x222, x222])\n\n y_samples = gp.sample(np.vstack([x_training, x_testing]), 1).squeeze()\n y_training = y_samples[: len(x_training)].reshape(-1, 1)\n y_testing = y_samples[len(x_training) :].reshape(-1, 1)\n training_data_set = data_loader.DataSet(X=x_training, Y=y_training)\n testing_data_set = data_loader.DataSet(X=x_testing, Y=y_testing)\n\n if shuffle:\n training_data_set.shuffle()\n testing_data_set.shuffle()\n\n return training_data_set, testing_data_set", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def generate_mnist_datasets(\n datapoints_per_task,\n K_list,\n cir_inner_loop_list, \n test_task_idx, \n val_task_idx,\n n_finetune_sets):\n\n # arbitrarily chosen, class-imbalance rate in outer and inner training loops\n cir_outer_loop = 0.5\n cir_inner_loop = 0.5\n # class-imbalance rate in the test sets of the test and validation tasks\n cir_test = 0.5\n # arbitrarily chosen, percentage of data that will be used in the inner training loop\n percent_data_inner_loop = 0.5\n\n percent_data_finetune_val = 0.8\n\n n_test_set = 4000\n\n test_task_idx, val_task_idx = test_task_idx, val_task_idx\n\n finetune_sets_per_K_cir = {}\n test_task_test_set, val_task = {}, {}\n \n\n train_task_list_inner, train_task_list_outer = [], []\n\n train_tasks_idxs = [i for i in range(0,10) if i not in [val_task_idx, test_task_idx]]\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n train_images, train_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/train-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/train-labels-idx1-ubyte')\n\n test_images, test_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/t10k-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/t10k-labels-idx1-ubyte')\n\n\n train_images, test_images = train_images.reshape((-1,28,28))/255.0, test_images.reshape((-1,28,28))/255.0\n images = np.concatenate((train_images, test_images))\n labels = np.concatenate((train_labels, test_labels))\n\n test_task_normal_indexes, val_task_normal_indexes = list(np.nonzero(labels == test_task_idx)[0]), list(np.nonzero(train_labels == val_task_idx)[0])\n test_task_X_normal, val_task_X_normal = images[test_task_normal_indexes],train_images[val_task_normal_indexes]\n test_task_Y_normal, val_task_Y_normal = np.zeros_like(labels[test_task_normal_indexes]), np.zeros_like(train_labels[val_task_normal_indexes])\n\n\n # val and test task have anomalies (samples of other numbers) that are not used for training\n # besides the two sets of anomalies (one for val task and one for test task are disjoint)\n test_task_anomalous_indexes = list(np.nonzero(test_labels[:5000] != test_task_idx)[0])\n val_task_anomalous_indexes= [index for index, element in enumerate(list(test_labels[5000:])) if element not in [val_task_idx, test_task_idx]]\n\n\n test_task_X_anomalous, val_task_X_anomalous = test_images[:5000][test_task_anomalous_indexes],test_images[5000:][val_task_anomalous_indexes]\n test_task_Y_anomalous, val_task_Y_anomalous = np.ones_like(test_labels[:5000][test_task_anomalous_indexes]), np.ones_like(test_labels[5000:][val_task_anomalous_indexes])\n\n test_task_X, val_task_X = np.concatenate((test_task_X_normal, test_task_X_anomalous)), np.concatenate((val_task_X_normal, val_task_X_anomalous))\n test_task_Y, val_task_Y = np.expand_dims(np.concatenate((test_task_Y_normal, test_task_Y_anomalous)),-1), np.expand_dims(np.concatenate((val_task_Y_normal, val_task_Y_anomalous)),-1)\n\n\n train_tasks_X_list, train_tasks_Y_list = [], []\n for task_idx in train_tasks_idxs:\n train_task_normal_indexes = list(np.nonzero(train_labels == task_idx)[0]) \n train_task_anomalous_indexes = [index for index, element in enumerate(list(train_labels)) if element not in [task_idx, val_task_idx, test_task_idx]]\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == val_task_idx)[0]) == 0)\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == test_task_idx)[0]) == 0)\n train_task_X_normal, train_task_X_anomalous = train_images[train_task_normal_indexes], train_images[train_task_anomalous_indexes]\n train_task_Y_normal, train_task_Y_anomalous = np.zeros_like(train_labels[train_task_normal_indexes]), np.ones_like(train_labels[train_task_anomalous_indexes])\n train_task_X, train_task_Y = np.concatenate((train_task_X_normal, train_task_X_anomalous)), np.concatenate((train_task_Y_normal, train_task_Y_anomalous))\n train_tasks_X_list.append(train_task_X)\n train_tasks_Y_list.append(np.expand_dims(train_task_Y,-1))\n\n\n\n # building test task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(test_task_Y == 0)[0]), list(np.nonzero(test_task_Y == 1)[0])\n n_test_set_normal = int(n_test_set*cir_test)\n test_set_normal_indexes = random.sample(normal_indexes, n_test_set_normal)\n test_set_anomaly_indexes = random.sample(anomaly_indexes, n_test_set - n_test_set_normal)\n test_set_indexes = []\n test_set_indexes += test_set_normal_indexes\n test_set_indexes += test_set_anomaly_indexes\n\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_X[test_set_indexes], test_task_Y[test_set_indexes]\n\n\n #shuffle\n s_test = np.arange(test_task_test_set['test_X'].shape[0])\n np.random.shuffle(s_test)\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_test_set['test_X'][s_test], test_task_test_set['test_Y'][s_test]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n\n\n for K in K_list:\n finetune_sets_per_cir = {}\n for cir in cir_inner_loop_list:\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n \n finetune_sets_list = []\n\n disjoint = False\n if(cir*K*n_finetune_sets<len(rest_normal_indexes)):\n disjoint = True\n\n n_finetune_normal = int(K*cir)\n n_finetune_anomaly = K - n_finetune_normal\n for i in range(n_finetune_sets):\n # if enough for disjoint do that\n # else sample randomly\n # store in a dict with keys cir_K\n finetune_normal_indexes = random.sample(rest_normal_indexes, n_finetune_normal)\n finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_finetune_anomaly)\n finetune_indexes = []\n finetune_indexes += finetune_normal_indexes\n finetune_indexes += finetune_anomaly_indexes\n finetune_set = {}\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = test_task_X[finetune_indexes], test_task_Y[finetune_indexes]\n\n #shuffle\n s_finetune = np.arange(finetune_set['finetune_X'].shape[0])\n np.random.shuffle(s_finetune)\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = finetune_set['finetune_X'][s_finetune], finetune_set['finetune_Y'][s_finetune]\n\n finetune_sets_list.append(finetune_set)\n \n if(disjoint):\n rest_normal_indexes = [index for index in rest_normal_indexes if index not in finetune_normal_indexes]\n rest_anomaly_indexes = [index for index in rest_anomaly_indexes if index not in finetune_anomaly_indexes]\n\n finetune_sets_per_cir[str(cir)] = finetune_sets_list\n\n finetune_sets_per_K_cir[str(K)] = finetune_sets_per_cir\n\n\n #building val task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(val_task_Y == 0)[0]), list(np.nonzero(val_task_Y == 1)[0])\n n_val_finetune = int(percent_data_finetune_val*datapoints_per_task)\n n_val_test_set = datapoints_per_task - n_val_finetune\n n_val_test_set_normal = int(n_val_test_set*cir_test)\n val_test_set_normal_indexes = random.sample(normal_indexes, n_val_test_set_normal)\n\n\n val_test_set_anomaly_indexes = random.sample(anomaly_indexes, n_val_test_set - n_val_test_set_normal)\n val_test_set_indexes = []\n val_test_set_indexes += val_test_set_normal_indexes\n val_test_set_indexes += val_test_set_anomaly_indexes\n val_task['test_X'], val_task['test_Y'] = val_task_X[val_test_set_indexes], val_task_Y[val_test_set_indexes]\n\n\n rest_normal_indexes = [index for index in normal_indexes if index not in val_test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in val_test_set_anomaly_indexes]\n\n n_val_finetune_normal = int(n_val_finetune*cir_inner_loop)\n val_finetune_normal_indexes = random.sample(rest_normal_indexes, n_val_finetune_normal)\n val_finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_val_finetune - n_val_finetune_normal)\n val_finetune_indexes = []\n val_finetune_indexes += val_finetune_normal_indexes\n val_finetune_indexes += val_finetune_anomaly_indexes\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task_X[val_finetune_indexes], val_task_Y[val_finetune_indexes]\n\n #shuffle\n s_val_finetune = np.arange(val_task['finetune_X'].shape[0])\n s_val_test = np.arange(val_task['test_X'].shape[0])\n np.random.shuffle(s_val_finetune)\n np.random.shuffle(s_val_test)\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task['finetune_X'][s_val_finetune], val_task['finetune_Y'][s_val_finetune]\n val_task['test_X'], val_task['test_Y'] = val_task['test_X'][s_val_test], val_task['test_Y'][s_val_test]\n\n\n\n # building sets of data of the training tasks\n for task_X, task_Y in zip(train_tasks_X_list, train_tasks_Y_list):\n normal_indexes, anomaly_indexes = list(np.nonzero(task_Y == 0)[0]), list(np.nonzero(task_Y == 1)[0])\n\n n_inner_loop = int(percent_data_inner_loop*datapoints_per_task)\n n_inner_loop_normal = int(n_inner_loop*cir_inner_loop)\n n_outer_loop = datapoints_per_task - n_inner_loop\n n_outer_loop_normal = int(n_outer_loop*cir_outer_loop)\n \n inner_loop_normal_indexes = random.sample(normal_indexes, n_inner_loop_normal)\n inner_loop_anomaly_indexes = random.sample(anomaly_indexes, n_inner_loop - n_inner_loop_normal)\n inner_loop_indexes = []\n inner_loop_indexes += inner_loop_normal_indexes\n inner_loop_indexes += inner_loop_anomaly_indexes\n\n train_task_inner_X, train_task_inner_Y = task_X[inner_loop_indexes], task_Y[inner_loop_indexes]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in inner_loop_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in inner_loop_anomaly_indexes]\n\n \n outer_loop_normal_indexes = random.sample(rest_normal_indexes, n_outer_loop_normal)\n outer_loop_anomaly_indexes = random.sample(rest_anomaly_indexes, n_outer_loop - n_outer_loop_normal)\n outer_loop_indexes = []\n outer_loop_indexes += outer_loop_normal_indexes\n outer_loop_indexes += outer_loop_anomaly_indexes\n\n train_task_outer_X, train_task_outer_Y = task_X[outer_loop_indexes], task_Y[outer_loop_indexes]\n\n\n s_inner = np.arange(train_task_inner_X.shape[0])\n s_outer = np.arange(train_task_outer_X.shape[0])\n np.random.shuffle(s_inner)\n np.random.shuffle(s_outer)\n train_task_list_inner.append([train_task_inner_X[s_inner],train_task_inner_Y[s_inner]])\n train_task_list_outer.append([train_task_outer_X[s_outer],train_task_outer_Y[s_outer]])\n\n\n\n train_tasks_inner_X = np.stack([train_task_list_inner[i][0]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_inner_Y = np.stack([train_task_list_inner[i][1]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_outer_X = np.stack([train_task_list_outer[i][0]\n for i in range(len(train_task_list_outer))], 0)\n train_tasks_outer_Y = np.stack([train_task_list_outer[i][1]\n for i in range(len(train_task_list_outer))], 0)\n\n \n train_tasks = {'X_train_inner': train_tasks_inner_X,\n 'Y_train_inner': train_tasks_inner_Y,\n 'X_train_outer': train_tasks_outer_X,\n 'Y_train_outer': train_tasks_outer_Y\n }\n\n\n return train_tasks, val_task, test_task_test_set, finetune_sets_per_K_cir", "def shuffle_dataset(self):\n # TODO explain approached used for selecting training and test data\n labels = self.dataset.label.unique()\n good_jobs = self.dataset[self.dataset.label == \"Good\"]\n bad_jobs = self.dataset[self.dataset.label == \"Bad\"]\n\n # TODO n>2 probablly won't work the way it's supposed to currently\n if len(labels) == 2:\n # oversample\n resize = max(len(good_jobs.label),len(bad_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label))\n good_jobs_re = good_jobs.sample(resize)\n bad_jobs_re = bad_jobs.sample(resize)\n dataset = pd.concat([good_jobs_re, bad_jobs_re])\n elif len(labels) == 3:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n # oversample\n resize = max(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re])\n elif len(labels) == 4:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n ideal_jobs = self.dataset[self.dataset.label == \"Ideal\"]\n\n # middle of the road approach\n resize = int(mean([len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label),len(ideal_jobs.label)]))\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n ideal_jobs_re = ideal_jobs.sample(resize,replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re,ideal_jobs_re])\n\n train,test = train_test_split(dataset,test_size=0.25,stratify = dataset.label,shuffle=True)\n #test = self.dataset[~self.dataset.isin(train)].dropna()\n #test = self.dataset[(~dataset.label.isin(self.dataset.label))&(~dataset.description.isin(self.dataset.description))]\n #0tr_hashes = [hash(tuple(d)) for d in train.description]\n #ytest = [val for iter,val in self.dataset.iterrows() if hash(tuple(val.description)) not in tr_hashes]\n\n self.y_train,self.y_test = train.label.values,test.label.values\n self.X_train,self.X_test = train.description.values,test.description.values", "def build_train_and_eval_datasets(self,\n dataset_name,\n eval_dataset_name,\n paracrawl_size=PARACRAWL_DEFAULT_SIZE,\n newscommentary_size=None,\n newscomment_sample_ratio=1.0):\n self.paracrawl_size = paracrawl_size\n if newscommentary_size:\n self.newscommentary_size = newscommentary_size\n self.newscomment_sample_ratio = newscomment_sample_ratio\n if dataset_name in self.custom_dataset.keys():\n logging.info('Building custom datatset: %s', dataset_name)\n return self.custom_dataset[dataset_name]()\n else:\n logging.info('Building DEFAULT datatset: %s', dataset_name)\n return self.default_builder(dataset_name, eval_dataset_name)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def get_dataset(dataset_dir, split_name, batch_size, workers):\n folder = os.path.join(dataset_dir, '{}_*.tfrecord'.format(split_name))\n filenames = tf.data.Dataset.list_files(folder)\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.shuffle(1000)\n dataset = dataset.repeat()\n dataset = dataset.map(preprocess, num_parallel_calls=workers)\n dataset = dataset.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(2)\n\n filename = '{}.txt'.format(split_name)\n with open(os.path.join(dataset_dir, filename), 'r') as f:\n examples = int(f.read().strip())\n\n return dataset.make_one_shot_iterator(), examples", "def make_fixture(binary=False, balanced=False, split=False):\n kwargs = {\n \"n_samples\": 100,\n \"n_features\": 20,\n \"n_informative\": 8,\n \"n_redundant\": 2,\n \"n_clusters_per_class\": 1,\n \"random_state\": 89092,\n }\n\n if binary:\n kwargs[\"n_classes\"] = 2\n kwargs[\"weights\"] = None if balanced else [0.3, 0.7]\n else:\n kwargs[\"n_classes\"] = 5\n kwargs[\"weights\"] = None if balanced else [0.1, 0.2, 0.4, 0.2, 0.01]\n\n X, y = make_classification(**kwargs)\n\n if split:\n X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=101)\n return Dataset(Split(X_train, X_test), Split(y_train, y_test))\n\n return Dataset(X, y)", "def genTrainingSet(set_of_CSVs, file_to_classify, train_size = 5):\n set_of_csvs_minus_target = copy.copy(set_of_CSVs)\n # remove the file we want to classify\n set_of_csvs_minus_target.remove(file_to_classify)\n\n # extract out the random noise files\n # first, set the seed\n random.seed(time.time())\n # now sample\n return_list = random.sample(set_of_csvs_minus_target, train_size)\n return return_list", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def balance_dataset_sampling(instances):\n probabilities = get_balancing_probabilities(instances)\n new_instances = [ (features, classification) \n for features, classification in instances \n if random.random() < probabilities[classification] ]\n\n return new_instances", "def get_datasets() -> List[Dataset]:\n\n amzn = Dataset(\n id='amzn', name='Amazon Reviews', language='en',\n description=\"This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\")\n\n cnn = Dataset(\n id='cnn_dailymail', name='CNN/ DailyMail', language='en',\n description='The well-known CNN/ DailyMail data set for text summarization (version 3.0.0). The data has been fetched via HuggingFace Datasets')\n\n swisstext = Dataset(\n id='swisstext', name='SwissText 2019', language='de',\n description='The dataset was published for the SwissText conference 2019. ')\n\n return [amzn, cnn, swisstext]", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def shuffle_datasets(self):\n assert self.data_tags is not None\n assert self.training_dataset is not None\n assert self.validation_dataset is not None\n self.training_dataset = self.shuffle_data_dictionary(self.training_dataset)\n self.validation_dataset = self.shuffle_data_dictionary(self.validation_dataset)", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il" ]
[ "0.6996355", "0.66383696", "0.6572915", "0.6517153", "0.64793134", "0.64199245", "0.64149666", "0.63592505", "0.63368434", "0.63012886", "0.6289509", "0.62852", "0.6267794", "0.62432927", "0.622932", "0.62188154", "0.62153995", "0.62117213", "0.6202799", "0.6186463", "0.6181616", "0.61775786", "0.616541", "0.6165191", "0.6150936", "0.6144637", "0.61238927", "0.61025333", "0.603672", "0.60283566", "0.602364", "0.6022501", "0.6004383", "0.5998322", "0.5987329", "0.59790707", "0.5970738", "0.5961037", "0.5953687", "0.5945128", "0.5942107", "0.5940959", "0.5939736", "0.5939431", "0.59283566", "0.5903652", "0.5895879", "0.5892734", "0.58888245", "0.58809286", "0.58714145", "0.58638835", "0.586067", "0.5851001", "0.5850389", "0.5845364", "0.58428246", "0.5839827", "0.5837802", "0.58347875", "0.58327353", "0.58313596", "0.5830717", "0.58269614", "0.5816473", "0.58119494", "0.58093786", "0.58005834", "0.5800125", "0.5795684", "0.57943314", "0.5792592", "0.57872796", "0.57820916", "0.57816476", "0.5781613", "0.5780905", "0.57786804", "0.5776356", "0.57758844", "0.57736754", "0.57707155", "0.5765874", "0.57605916", "0.57592845", "0.5758428", "0.57525045", "0.57482666", "0.57465696", "0.5746493", "0.5744902", "0.5741424", "0.57375306", "0.57308316", "0.5729516", "0.5727606", "0.57272744", "0.57249546", "0.57206756", "0.57203496" ]
0.68757796
1
Add error messages with Code for easy debugging
def add_codes(cls): class ErrorsWithCodes: # pylint: disable=too-few-public-methods """Add error messages with Code for easy debugging """ def __getattribute__(self, code): msg = getattr(cls, code) return f'[{code}] {msg}' return ErrorsWithCodes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(message):\n print str(message)", "def error(self, message=None, show_help=True):", "def error(self, message):\n print message", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))", "def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))", "def show_error(title, message, print_message=False):\n\n pass", "def debug(msg):", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def add_error(*msg):\n\n global errors\n errors.append(''.join(msg))", "def error_message(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def debug_error(self, message):\n raise NotImplementedError", "def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())", "def apology(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message.upper()), code", "def errprint(msg):\n\n print('!! *** ERROR: %s' % msg)", "def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))", "def __str__(self) -> str:\r\n return f'{self.error_code} ---> {self.message}'", "def __str__(self):\n return \"ERROR: \" + self.error_message", "def print_error_message(message):\r\n return print('ERROR:',message)", "def error(context, *infos):\n messages = [\"An error occurred when when \" + context + \":\"]\n messages.extend(infos)\n print(\"\\n\\t\".join(map(str, messages)))\n return 1", "def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')", "def log_error(title, message):\n if title == \"Redundant\":\n print(f\"[{title}]: Refactoring is not necessary\")\n else:\n print(f\"[{title}]: Refactoring is not allowed\")\n print(f\"{message}\")", "def error(self, text):\n\n debug_text = self._get_debug_text(text)\n if self._live_debug_level < logging.ERROR and self._live_debug_enabled:\n if self.py_cui_root is not None:\n self.py_cui_root.status_bar.set_text(debug_text)\n super().debug(debug_text)\n else:\n super().error(debug_text)", "def add_error(self, msg):\n self._add_message(msg, self._errors)", "def error(err):\n\n return str(err) + '\\n'", "def error(message: str) -> None:\n print(f\"ERROR: {message}\")", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def error(self, value='', line_before=False):\n self.errors += 1\n if line_before:\n print('\\n')\n print(Fore.RED + '!!! ' + value)", "def error(msg):\n click.secho(f'[ERROR] {msg}', fg='red')", "def errMsg(self, code, text):\n # Preprocess text\n lines = text.splitlines()\n\n image = self.errMsgImage.copy()\n draw = ImageDraw.Draw(image)\n # Text\n x0 = self.width/4 + 2\n y0 = -1\n draw.text((x0, y0), 'ERROR {:5d}'.format(code), font=self.font, fill=255)\n for i in range(0,len(lines)):\n draw.text((x0, y0 + (i+1)*7), lines[i], font=self.font, fill=255)\n self.disp.image(image.rotate(180))\n self.disp.display()\n return", "def error(self, msg, *args, **kwargs):\n pass", "def present_error_massage(self, invalid_equation_code):\n print(\"Invalid equation\")\n print(self.ERROR_MASSAGE_DIC[invalid_equation_code])", "def error(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message)", "def error_debug(input):\n print(\"\\033[1;31;40m{}\\033[0m\".format(input))", "def error(self, message):\n sys.stderr.write(message[0].capitalize() + message[1:] + '\\n')\n sys.stderr.write('Use \"arhc.py --help\" to view more information.\\n')\n exit()", "def error(indent, message):\n print \"%sError: %s\" % (indent, message)", "def error(self):\n ...", "def buildErrorMessage(self, test, err):\n\n errorMessage = \"\"\n errorMessage += test.id()\n errorMessage += \"\\n\\n\"\n\n errorMessage += traceback.format_exc() + \"\\n\"\n return errorMessage", "def collect_errors_and_warnings(self) -> str:\n # Complete error message\n message = \"----------------ERRORS----------------\\n\"\n if self.errors == \"\":\n message = \"YOUR FILE IS VALIDATED!\\n\"\n logger.info(message)\n else:\n for error in self.errors.split(\"\\n\"):\n if error != \"\":\n logger.error(error)\n message += self.errors\n if self.warnings != \"\":\n for warning in self.warnings.split(\"\\n\"):\n if warning != \"\":\n logger.warning(warning)\n message += \"-------------WARNINGS-------------\\n\" + self.warnings\n return message", "def vpython_error_message():\n error_message = (\n \"<p>&#9888; Sorry, spacesimmer! OrbitX has crashed for \"\n \"some reason.</p>\"\n\n \"<p>Any information that OrbitX has on the crash has \"\n \"been saved to a logfile. If you want to get this problem fixed,\"\n \" send the contents of the log file \"\n \"<blockquote>\" +\n logs.logfile_name.replace('\\\\', '\\\\\\\\') +\n \"</blockquote> \"\n \"to Patrick Melanson along with a description of what was \"\n \"happening in the program when it crashed.</p>\"\n\n \"<p>Again, thank you for using OrbitX!</p>\"\n )\n vpython.canvas.get_selected().append_to_caption(f\"\"\"<script>\n if (document.querySelector('div.error') == null) {{\n error_div = document.createElement('div');\n error_div.className = 'error';\n error_div.innerHTML = \"{error_message}\";\n document.querySelector('body').prepend(error_div);\n }}\n </script>\"\"\")\n vpython.canvas.get_selected().append_to_caption(\"\"\"<style>\n .error {\n color: #D8000C !important;\n background-color: #FFBABA;\n margin: 10px 0;\n padding: 10px;\n border-radius: 5px 5px 5px 5px;\n width: 700px;\n }\n span.code {\n color: #D8000C !important;\n font-family: monospace;\n }\n blockquote {\n font-family: monospace;\n }\n </style>\"\"\")\n\n time.sleep(0.1) # Let vpython send out this update", "def _error(msg):\n\n error(None, msg)", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def err_message(self, message):\n self.errors.append(1)\n message = \"<b>\" + message + \"</b>\"\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n # Show if is was hidden\n if self.hidden:\n self.toggle()\n self.was_hidden = True\n self.left_label.set_markup(message)", "def addError(self, test, err):\r\n self.errors.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True", "def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)", "def append_error(self, msg):\n if msg.startswith(IGNORE_PREFIX):\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator: error message cannot be ignored (%s)\" % msg)\n msg = msg[len(IGNORE_PREFIX):]\n self._messages.append((msg, 'alert-error'))\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator error hint: \" + msg)\n self._hasError = True", "def debug_error_message(msg):\r\n\r\n action = config.compute_test_value\r\n\r\n #this message should never be called when the debugger is off\r\n assert action != 'off'\r\n\r\n if action in ['raise', 'ignore']:\r\n raise ValueError(msg)\r\n else:\r\n assert action == 'warn'\r\n warnings.warn(msg, stacklevel=2)", "def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error", "def add_error(self, u_file: UserFile, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:", "def error(message):\n print(message, file=sys.stderr)", "def display_errors(self):\r\n\r\n def format_name(field_name):\r\n \"\"\"Formats field names for error display\"\"\"\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()\r\n\r\n msg = \"Please correct the following errors:\\n\"\r\n msg += \"\\n\".join(\r\n \"%s: {r%s{n\" % (format_name(field), \", \".join(errs))\r\n for field, errs in self.errors.items()\r\n )\r\n return msg", "def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)", "def sendErrorMessage(msg): #@NoSelf", "def send_error(msg):\n\n print(msg)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def error(self, message, **args):\n\t\terror_message = Utils.boldCode() + \"Error: \" + Utils.normalCode() + message\n\t\t\n\t\tif args.has_key(\"target\"):\n\t\t\tself.sendMessage(args[\"target\"], error_message)\n\t\t\t\n\t\tif args.has_key(\"console\"):\n\t\t\tif args[\"console\"]:\n\t\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)\n\t\telse:\n\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)", "def _display_error(message: str) -> None:\n print()\n print(message, end='\\n\\n')", "def error(msg):\n\n raise Exception(msg)", "def addExceptionMessage(self, q, inst, traceback):\n self.fail('FAIL: Exception raised: %s' % inst)\n self.addMessage('')\n for line in traceback.format_exc().split('\\n'):\n self.addMessage(line)", "def show_errors(self):\n\n if self.errors:\n print('Clean error in:')\n for file in self.errors:\n print(' %s' % file)", "def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)", "def show_error(self):\n logging.error('=> ', self.test_script_source.line)\n total_len = 0\n i = 0\n if self.test_script_source.current_pos <= self.test_script_source.total_num_seg:\n if i < self.test_script_source.current_pos - 1:\n total_len = total_len + len(self.test_script_source.line_segments[i])\n i += 1\n else:\n total_len = len(self.test_script_source.line)\n\n if self.test_script_source.current_pos > 1:\n logging.error('=> ', (' ' * (total_len + 1)) + '^')\n else:\n logging.error('=> ', (' ' * total_len) + '^')", "def __str__(self):\n return \"Error: %s\"%self.__message", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def showerrors():\n errorMessages = middleware.ixn.showErrorMessage(silentMode=True)\n if errorMessages:\n print(errorMessages)\n print()", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def error(self, *args, **kwargs):\n if len(args) == 3:\n print(f\"ERROR: {args[1]}\")\n else:\n print(f\"ERROR: {args[0]}\")", "def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary", "def addError(self, test, err):\n\n super(ForceBalanceTestResult, self).addError(test,err)\n self.logger.warning(\"\\r\\x1b[33;1mERROR\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test,err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")", "def error_general_details(traceback_str: str) -> str:\n return f\"Here is some more info on the error I encountered:\\n```{traceback_str}```\"", "def error(msg):\n log('ERROR', msg)", "def log_error(err):\n print(err)", "def ErrorString(self): # real signature unknown; restored from __doc__\n pass", "def creation_error(src_dict: Dict[str, List[str]], e: str):\n return \"LED Group error in %s: %s\\n)\" % (json.dumps(src_dict), e)", "def msg(cls, error=None, debug=True, trace=True):\n if debug and error is not None:\n print(error)\n if debug and trace:\n print(traceback.format_exc())", "def error(self, *args, **kwargs):", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def debug(self, message):\r\n pass", "def debug_error(self, message):\n self.emit(QtCore.SIGNAL(\"debug_error(QString)\"), message)", "def error(self, text, info=None):\n self.details[\"message\"] = text\n if info:\n self.details[\"details\"] = info", "def error(self, error):\n pass", "def report_error(self, code, lines=()):\n errordesc = self.get_error_description(code)\n\n print \"%d %s\" % (code, errordesc)\n\n # If lines are empty, no further processing needed\n if not lines: return\n\n # Always process list of lines. If the lines variable is a tuple, \n # i.e. we convert it to a single item list\n if type(lines).__name__ == 'tuple': lines = [lines,]\n\n for line in lines:\n # output detailed lines if verbose is on\n if self.verbose: print \" line %d: %s\" % (line[0], \",\".join(line[1:]))\n # record the errors in error tracking \n if code in self.errors:\n self.errors[code] += [(line[0], self.content[line[0]-1])]\n else:\n self.errors[code] = [(line[0], self.content[line[0]-1])]", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)", "def errmsg(self, str, prefix=\"** \"):\n raise NotImplementedError(NotImplementedMessage)", "def print_error(string: str, begin: str = '') -> str:\n return begin + Fore.RED + \"[ERROR] \" + string + Fore.RESET", "async def gen_error(error_id: str, ctx: commands.Context) -> Embed:\n errors = get_file(\"errors\")\n error = Embed(color=error_color)\n error.add_field(name=\"⚠️ \" + errors[error_id][\"title\"], value=errors[error_id]['txt'])\n error = set_footer(error, ctx)\n await ctx.send(embed=error)", "def _display_syntax_error(self, errorid):\n\n # For total error count\n self.syntax_errors_list.append(errorid)\n\n if errorid == \"start\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected START.\")\n elif errorid == self.scanner.END_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected END.\")\n elif errorid == self.scanner.DEVICES_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected DEVICES.\")\n elif errorid == self.scanner.CONNECTIONS_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected CONNECTIONS.\")\n elif errorid == self.scanner.MONITORS_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected MONITORS.\")\n\n elif errorid == \"devicename\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid device name.\")\n elif errorid == \"devicetype\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid device type.\")\n elif errorid == \"parameter\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid parameter type.\")\n elif errorid == \"semicoloncomma\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a semicolon or a comma.\")\n elif errorid == \"number\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid input number.\")\n elif errorid == \"doutput\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Only DTypes can specify an output. \\\n Either an invalid DType output or should not have an output.\")\n elif errorid == \"arrowperiod\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected either an arrow or a DType output\")\n\n elif errorid == \"semicolon\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a semicolon.\")\n elif errorid == \"equal\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected an equal sign.\")\n elif errorid == \"comma\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a comma.\")\n elif errorid == \"period\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a period.\")\n elif errorid == \"arrow\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected an arrow ->.\")\n elif errorid == \"input\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Inputs must either start with I or be \\\n DATA, CLK, SET, CLEAR.\")\n\n return None", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)" ]
[ "0.72324693", "0.71228033", "0.71033734", "0.6947497", "0.69397295", "0.6862639", "0.68276596", "0.6808824", "0.6751318", "0.67416126", "0.67329", "0.67084694", "0.6701527", "0.6690459", "0.66822183", "0.66812146", "0.66701764", "0.66615677", "0.6634007", "0.6626952", "0.66089886", "0.6604817", "0.66015786", "0.65814704", "0.6571538", "0.6565257", "0.65442526", "0.654274", "0.6541892", "0.65001947", "0.6498834", "0.64867353", "0.6483454", "0.64497983", "0.64383364", "0.64179623", "0.6415447", "0.6412143", "0.6410666", "0.64099234", "0.6405197", "0.63996583", "0.6395643", "0.6388413", "0.63868636", "0.6381314", "0.6366815", "0.6365971", "0.6364293", "0.6340537", "0.6323272", "0.6321493", "0.63211286", "0.63174444", "0.62999016", "0.6297126", "0.6291874", "0.6285708", "0.62795585", "0.6278626", "0.6278083", "0.62716115", "0.6269509", "0.6267329", "0.6265789", "0.625577", "0.62551546", "0.6254571", "0.6254571", "0.6254571", "0.6254571", "0.6254571", "0.6254571", "0.6254571", "0.6254571", "0.62482077", "0.624783", "0.62443846", "0.6244206", "0.6241399", "0.62271994", "0.62174934", "0.6213507", "0.6210837", "0.6204333", "0.620209", "0.61985195", "0.61897236", "0.61850846", "0.61819685", "0.6162397", "0.6158878", "0.6154911", "0.614377", "0.61436313", "0.61430615", "0.6142086", "0.61386573", "0.613524", "0.613524" ]
0.6224254
81
Takes an URL, a filename, and the expected bytes, download the contents and returns the filename num_bytes=None disables the file size check.
def maybe_download(url, filename, prefix, num_bytes=None): local_filename = None if not os.path.exists(os.path.join(prefix, filename)): try: logger.info("Downloading file {}...".format(url + filename)) with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: local_filename, _ = urlretrieve(url + filename, os.path.join(prefix, filename), reporthook=_reporthook(t)) except AttributeError as e: logger.error("An error occurred when downloading the file! Please get the dataset using a browser.") raise e # We have a downloaded file # Check the stats and make sure they are ok file_stats = os.stat(os.path.join(prefix, filename)) if num_bytes is None or file_stats.st_size == num_bytes: logger.info("File {} successfully loaded".format(filename)) else: raise Exception("Unexpected dataset size. Please get the dataset using a browser.") return local_filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print ('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath", "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(url, filename, prefix, num_bytes=None):\n local_filename = None\n if not os.path.exists(os.path.join(prefix, filename)):\n try:\n print(\"Downloading file {}...\".format(url + filename))\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n local_filename, _ = urlretrieve(url + filename, os.path.join(prefix, filename),\n _reporthook=_reporthook(t))\n except AttributeError as e:\n print(\"An error occurred when downloading the file! Please get the dataset using a browser.\")\n raise e\n # We have a downloaded file\n # Check the stats and make sure they are ok\n file_stats = os.stat(os.path.join(prefix, filename))\n if num_bytes is None or file_stats.st_size == num_bytes:\n print(\"File {} successfully loaded\".format(filename))\n else:\n raise Exception(\"Unexpected dataset size. Please get the dataset using a browser.\")\n\n return local_filename", "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, filename,\n reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(filename)\n\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify ' + filename + \\\n '. Can you get to it with a browser?')\n return filename", "def _download(url, file_name):\n # File length can only be approximated from the resulting GET, unfortunately\n r = requests.get(url, stream=True)\n if 'Content-Length' in r.headers:\n file_len = int(r.headers['Content-Length'])\n elif 'X-Original-Content-Length' in r.headers:\n file_len = int(r.headers['X-Original-Content-Length'])\n else:\n file_len = 0\n r.raw.decode_content = True\n with open(file_name, 'wb') as f:\n _copyfileobj(r.raw, f, chunks=(file_len / (64. * 1024)))\n r.close()\n\n return file_name", "def maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename", "def download_file(url, filename):\n with requests.get(url, stream=True) as res:\n if res.status_code == 200:\n with open(filename, 'wb') as f:\n for chunk in res.iter_content(chunk_size=8192): \n f.write(chunk)\n else:\n raise ValueError(\"{} {}\".format(res.status_code, url))\n return filename", "def download_matt_mahoney_text8(filename, expected_bytes):\n if not os.path.exists(filename):\n print('Downloading ...')\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes, force=False):\n if not P.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n filepath = P.join(DATA_DIR, filename)\n if force or not P.exists(filepath):\n print(\"Downloading %s, %s bytes...\" % (filename, sizeof_fmt(expected_bytes)))\n url = 'http://commondatastorage.googleapis.com/books1000/'\n urlretrieve(url + filename, filepath)\n\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')\n\n return filename", "def download_file(url: str) -> str:\n\n assert len(url) > 0\n\n filename = url.split('/')[-1]\n\n with open(filename, 'wb') as output_file:\n response = requests.get(url, stream=True)\n total = response.headers.get('content-length')\n\n if total is None:\n output_file.write(response.content)\n else:\n downloaded = 0\n total = int(total)\n for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):\n downloaded += len(data)\n output_file.write(data)\n done = int(50 * downloaded / total)\n sys.stdout.write('\\r[{}{}]'.format('█' * done, '.' * (50 - done)))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n return filename", "def maybe_download(filename):\n\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.Size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def maybe_download(filename, work_directory, source_url):\n\tif not gfile.Exists(work_directory):\n\t\tgfile.MakeDirs(work_directory)\n\tfilepath = os.path.join(work_directory, filename)\n\tif not gfile.Exists(filepath):\n\t\ttemp_file_name, _ = urlretrieve_with_retry(source_url)\n\t\tgfile.Copy(temp_file_name, filepath)\n\t\twith gfile.GFile(filepath) as f:\n\t\t\tsize = f.size()\n\t\tprint('Successfully downloaded', filename, size, 'bytes.')\n\treturn filepath", "def download_file(filename, url):\n block_size = 10240 * 1024 # 10 MB\n tmp_filename = filename + '.part'\n first_byte = os.path.getsize(tmp_filename) if os.path.exists(tmp_filename) else 0\n file_mode = 'ab' if first_byte else 'wb'\n file_size = int(requests.head(url).headers['Content-length'])\n headers = { \"Range\": \"bytes=%s-\" % first_byte }\n r = requests.get(url, headers=headers, stream=True)\n\n if os.path.getsize(filename) > file_size:\n return False\n\n print('Downloading: %s' % url)\n print('Starting download at %.0f MB' % (first_byte / 1e6))\n\n with open(tmp_filename, file_mode) as f:\n for chunk in r.iter_content(chunk_size=block_size):\n if chunk:\n # filter out keep-alive new chunks\n f.write(chunk)\n\n shutil.move(tmp_filename, filename)\n print(\"Saved: %s\" % filename)", "def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def download_file(url: str, fdst):\n split = urlsplit(url)\n filename = os.path.basename(split.path)\n\n print('Downloading {}'.format(filename))\n\n with urllib.request.urlopen(url) as response:\n length = response.getheader('content-length')\n if length:\n total = int(length)\n copyfileobj_with_progress(response, fdst, total=total)", "def download_file(filename, url):\n print(\"downloading {0}\".format(url))\n with open(filename, \"wb\") as fout:\n response = requests.get(url, stream=True, verify=False)\n response.raise_for_status()\n # Write response data to file\n iblock = 0\n for block in response.iter_content(4096):\n if iblock % 10000 == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n iblock += 1\n fout.write(block)", "def anon_download(url: str):\n if verify(url):\n location = download(url)\n return location\n return 6", "def maybe_download(url, file_name, work_directory):\n\tif not os.path.exists(work_directory):\n\t\tos.mkdir(work_directory)\n\t\t\n\tfile_path = os.path.join(work_directory, file_name)\n\n\tif not os.path.exists(file_path):\n\t\tfile_path, _ = urllib.request.urlretrieve(url, file_path)\n\t\tstatinfo = os.stat(file_path)\n\t\tprint('Successfully downloaded', file_name, statinfo.st_size, 'bytes.')\n\t\n\tprint(\"{} existed\".format(file_path))\n\n\treturn file_path", "def download_file_(url, filename, cookie = cookie):\n opener = urllib.request.build_opener()\n opener.addheaders = [('Cookie', cookie)]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, filename)\n file_size = os.path.getsize(filename)/1048576\n #print(\"File Size is :\", file_size, \"MB\")\n return(file_size)", "def download_urllib(url, filename):\n print(\"Trying to Download via urllib from:\\n \", url)\n keep_going = True\n try:\n url_res = urllib2.urlopen(url)\n except (HTTPError, URLError, ssl.CertificateError) as err:\n print(\"Error: %s\" % err)\n return False\n with open(filename, 'wb') as outfile:\n block_sz = 8192\n meta = url_res.info()\n meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all\n meta_length = meta_func(\"Content-Length\")\n file_size = None\n if meta_length:\n file_size = int(meta_length[0])\n message = \"Downloading: {0}\\nBytes: {1}\\n\".format(url, file_size)\n dstyle = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE\n if file_size:\n progress = wx.ProgressDialog('Downloading', message,\n maximum=1+file_size/block_sz, style=dstyle)\n else:\n progress = wx.ProgressDialog('Downloading', message, style=dstyle)\n\n file_size_dl = 0\n while keep_going:\n read_buffer = url_res.read(block_sz)\n if not read_buffer:\n progress.Update(file_size_dl / block_sz, \"message+\\nDONE!\")\n wx.Sleep(0.2)\n break\n\n file_size_dl += len(read_buffer)\n outfile.write(read_buffer)\n\n status = \"{0:16}\".format(file_size_dl)\n if file_size:\n status += \" [{0:6.2f}%]\".format(file_size_dl * 100 / file_size)\n (keep_going, dummy_skip) = progress.Update(file_size_dl / block_sz,\n message+status)\n wx.Sleep(0.08) # Give the GUI some update time\n progress.Destroy()\n \n result = os.path.exists(filename) and os.stat(filename).st_size > 0\n return result", "def download(url, filename, delete_if_interrupted=True, chunk_size=4096):\n try:\n with open(filename, \"wb\") as f:\n print(\"Downloading {} > {}\".format(url, filename))\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n total_length = int(total_length)\n with tqdm(total=total_length) as progressbar:\n for data in response.iter_content(chunk_size=chunk_size):\n if data: # filter-out keep-alive chunks\n f.write(data)\n progressbar.update(len(data))\n except Exception as e:\n if delete_if_interrupted:\n print(\"Removing incomplete download {}.\".format(filename))\n os.remove(filename)\n raise e\n return filename", "def download_file_from_url(\n url: str, filename: Path, show_progress: bool = True\n) -> bool:\n try:\n response = requests.get(url, stream=True)\n response.raise_for_status()\n\n except requests.exceptions.Timeout:\n logger.critical(f\"Was unable to get the download url: {url}. Timeout Error.\")\n return False\n except requests.exceptions.HTTPError as exc:\n logger.critical(\n f\"HTTP Error ({exc.response.status_code}): downloading file from {url}\"\n )\n return False\n\n total_size_in_bytes = int(response.headers.get(\"content-length\", 0))\n\n if total_size_in_bytes == 0:\n logger.critical(f\"content-length is 0 and it should not be\")\n return False\n\n total_downloaded = 0\n block_size = 8092 # 8K blocks might want to tune this.\n\n progress_bar = (\n tqdm(\n desc=f\"{str(filename) : <45}\",\n total=total_size_in_bytes,\n unit=\"iB\",\n unit_scale=True,\n bar_format=\"{l_bar:45}{bar:35}{r_bar}{bar:-10b}\",\n )\n if show_progress\n else InvisibleProgress()\n )\n\n # if the file name contains '/', create subdirectories and download there\n ensure_dirpath_exists(Path(os.path.dirname(filename)))\n\n try:\n with open(filename, \"wb\") as file:\n for data in response.iter_content(block_size):\n progress_bar.update(len(data))\n total_downloaded += len(data)\n file.write(data)\n except IOError as ex:\n logger.critical(f\"IOError opening {filename} for writing: {ex}\")\n return False\n\n if total_downloaded != total_size_in_bytes:\n logger.critical(\n f\"Error in downloading {filename}: expected {total_size_in_bytes} bytes, downloaded {total_downloaded} bytes\"\n )\n return False\n return True", "def _download_file(file_url: str, file_path: str) -> str:\n if os.path.exists(file_path):\n return file_path\n op_desc = f\"Downloading {os.path.basename(file_path)}\"\n try:\n with requests.Session() as req_sess:\n req_res = req_sess.get(file_url, stream=True)\n total_length = int(req_res.headers.get(\"Content-Length\"))\n with tqdm.wrapattr(req_res.raw, \"read\", total=total_length, desc=op_desc) as raw:\n with open(file_path , \"wb\") as file:\n shutil.copyfileobj(raw,file)\n return file_path\n except Exception as network_error:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise network_error", "def normal_download(url, directory):\n original_name = url.split('/')[-1]\n tmp_file_name = directory + \"/\" + original_name + \"_tmp2\"\n file_name = directory + \"/\" + original_name\n\n file_size = int(requests.head(url).headers['Content-Length'])\n logging.debug('%s file size:%s' % (original_name, file_size))\n\n try:\n urlretrieve(url, tmp_file_name, download_callback)\n except Exception as e:\n logging.error(e)\n return 1\n\n current_file_size = os.path.getsize(tmp_file_name)\n if current_file_size != file_size:\n logging.error(\"download failed,file size not match, original is %d, %d downloaded\"\n % (file_size, current_file_size))\n ret = 1\n return ret\n\n # remove the file if exists\n if os.path.exists(file_name):\n os.remove(file_name)", "def _download_file(url: str, output_path: str):\n\n def write_to_file(response: requests.Response, output_path: str) -> int:\n \"\"\"Write the response content to the given file.\n\n :param response: Response to be written to the output file.\n :param output_path: Path to the output file.\n :returns: Number of bytes read from the response content.\n \"\"\"\n read_bytes = 0\n with open(output_path, \"wb\") as output_file:\n # Use the same chunk size of `urlretrieve`\n for chunk in response.iter_content(chunk_size=1024 * 8):\n read_bytes += len(chunk)\n output_file.write(chunk)\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n break\n return read_bytes\n\n try:\n with requests.get(\n url, stream=True, timeout=FETCHER_REQUEST_TIMEOUT\n ) as response:\n response.raise_for_status()\n\n content_length = int(response.headers.get(\"Content-Length\", 0))\n if content_length > FETCHER_MAXIMUM_FILE_SIZE:\n raise REANAFetcherError(\"Maximum file size exceeded\")\n\n read_bytes = write_to_file(response, output_path)\n\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n os.remove(output_path)\n raise REANAFetcherError(\"Maximum file size exceeded\")\n except HTTPError as e:\n error = f\"Cannot fetch the workflow specification: {e.response.reason} ({response.status_code})\"\n if response.status_code == 404:\n error = \"Cannot find the given workflow specification\"\n raise REANAFetcherError(error)\n except Timeout:\n raise REANAFetcherError(\n \"Timed-out while fetching the workflow specification\"\n )\n except RequestException:\n raise REANAFetcherError(\n \"Something went wrong while fetching the workflow specification\"\n )", "def download(url: str, filename: str):\n resp = requests.get(url, stream=True)\n total = int(resp.headers.get(\"content-length\", 0))\n file = open(os.path.join(DATA_DIR, filename), \"wb\")\n bar = tqdm(\n desc=filename,\n total=total,\n unit=\"iB\",\n unit_scale=True,\n unit_divisor=1024,\n )\n for data in resp.iter_content(chunk_size=1024):\n size = file.write(data)\n bar.update(size)\n file.close()\n return True", "def download_file(url, file_name):\n conn = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n with conn.request('GET', url, preload_content=False) as resp, open(file_name, 'wb') as out:\n shutil.copyfileobj(resp, out)", "def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path", "def download_file(url, path, filesize):\n # Check if the file already exists, and don't download it if it does.\n if os.path.exists(path):\n # Make sure that we have a full download\n if os.path.getsize(path) == filesize:\n print(\"Skipping %s, already exists.\" % os.path.basename(path))\n return\n else:\n print(\"Found incomplete download of %s, retrying.\" %\n os.path.basename(path))\n os.remove(path)\n\n class TqdmUpTo(tqdm):\n \"\"\"Provides `update_to(n)` which uses `tqdm.update(delta_n)`.\"\"\"\n def update_to(self, b=1, bsize=1, tsize=None):\n \"\"\"\n b : int, optional\n Number of blocks transferred so far [default: 1].\n bsize : int, optional\n Size of each block (in tqdm units) [default: 1].\n tsize : int, optional\n Total size (in tqdm units). If [default: None] remains unchanged.\n \"\"\"\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n) # will also set self.n = b * bsize\n\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=url.split('/')[-1]) as t: # all optional kwargs\n urlretrieve(url, filename=path, reporthook=t.update_to, data=None)", "def download_from_url(url, path):\n\n with open(path, \"wb\") as f:\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s] %s%%\" % ('=' * done, ' ' * (50 - done), done * 2))\n sys.stdout.flush()", "def maybe_download(filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n return filepath", "def download_file(url, filename):\n\n with DownloadProgressBar(unit=\"B\",\n unit_scale=True,\n miniters=1,\n desc=url.split(\"/\")[-1]\n ) as t:\n urllib.request.urlretrieve(url, filename=filename, reporthook=t.update_to)", "def download_file (url):\n\n '''\n Try and download the file given in the url,\n throw up an error if not possible.\n '''\n try:\n ret = urllib2.urlopen (url)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n return None\n\n print \"Downloaded \" + url\n\n return ret", "def download_file(filename, url):\n with open(filename, 'wb') as fout:\n response = requests.get(url, stream=True)\n response.raise_for_status()\n # Write response data to file\n for block in response.iter_content(4096):\n fout.write(block)", "def download_file(url, outputfile):\r\n try:\r\n req = requests.get(url, stream=True, timeout=120)\r\n try:\r\n with open(outputfile, 'wb') as file_download:\r\n for chunk in req.iter_content(chunk_size=1024): \r\n if chunk: \r\n file_download.write(chunk)\r\n except IOError as error:\r\n print error\r\n except requests.exceptions.RequestException as err:\r\n print err\r\n except socket.error as err:\r\n print err\r\n return None", "def t_getfile(self, link, filename, session):\n\n self.sema.acquire()\n\n filepath = os.path.join(os.getcwd() + '/Downloads/' + str(filename))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n if not os.path.isfile(filepath):\n self.download_new_file(link, filepath, session)\n else:\n\n current_bytes = os.stat(filepath).st_size\n\n headers = requests.head(link).headers\n\n print(headers)\n if 'content-length' not in headers:\n print(f\"server doesn't support content-length for {link}\")\n self.sema.release()\n return\n\n total_bytes = int(requests.head(link).headers['content-length'])\n\n print(total_bytes)\n\n if current_bytes < total_bytes:\n #\n self.continue_file_download(link, filepath, session, current_bytes, total_bytes)\n print(f\"Current byte < total - remaining {total_bytes - current_bytes}\")\n else:\n print(f\"already done: {filename}\")\n\n self.sema.release()", "def url_retrieve(url, output_file):\n r = requests.get(url, allow_redirects=True)\n if r.status_code != 200:\n raise ConnectionError(f\"Could not download {url}\\nError code: {r.status_code}\")\n\n output_file.write_bytes(r.content)", "def download(\n images_url,\n filename,\n buffer_size=_DEFAULT_BUFFER_SIZE,\n print_progress=False,\n download_limit=None\n ):\n download_limit = download_limit or _DEFAULT_DOWNLOAD_LIMIT\n log(\"TRACE\", \"Downloading {} to {}\".format(images_url, filename))\n try:\n resp = requests.get(images_url, stream=True, proxies=_PROXIES,\n headers={'User-Agent': 'UHD Images Downloader'})\n except TypeError:\n # requests library versions pre-4c3b9df6091b65d8c72763222bd5fdefb7231149\n # (Dec.'12) workaround\n resp = requests.get(images_url, prefetch=False, proxies=_PROXIES,\n headers={'User-Agent': 'UHD Images Downloader'})\n if resp.status_code != 200:\n raise RuntimeError(\"URL does not exist: {}\".format(images_url))\n filesize = float(resp.headers['content-length'])\n if filesize > download_limit:\n if not ask_permission(\n \"The file size for this target ({:.1f} MiB) exceeds the \"\n \"download limit ({:.1f} MiB). Continue downloading?\".format(\n filesize/1024**2, download_limit/1024**2)):\n return 0, 0, \"\"\n filesize_dl = 0\n base_filename = os.path.basename(filename)\n if print_progress and not sys.stdout.isatty():\n print_progress = False\n log(\"INFO\", \"Downloading {}, total size: {} kB\".format(\n base_filename, filesize/1000))\n with open(filename, \"wb\") as temp_file:\n sha256_sum = hashlib.sha256()\n for buff in resp.iter_content(chunk_size=buffer_size):\n if buff:\n temp_file.write(buff)\n filesize_dl += len(buff)\n sha256_sum.update(buff)\n if print_progress:\n status = r\"%05d kB / %05d kB (%03d%%) %s\" % (\n int(math.ceil(filesize_dl / 1000.)), int(math.ceil(filesize / 1000.)),\n int(math.ceil(filesize_dl * 100.) / filesize),\n base_filename)\n if os.name == \"nt\":\n status += chr(8) * (len(status) + 1)\n else:\n sys.stdout.write(\"\\x1b[2K\\r\") # Clear previous line\n sys.stdout.write(status)\n sys.stdout.flush()\n if print_progress:\n print('')\n return filesize, filesize_dl, sha256_sum.hexdigest()", "def maybe_download(filepath, url):\n\n if os.path.exists(filepath):\n logger.info(\"Not downloading, file already found: %s\" % filepath)\n return filepath\n\n logger.info(\"Downloading %s to %s\" % (url, filepath))\n try:\n tf.gfile.Copy(url, filepath)\n except tf.errors.UnimplementedError:\n try:\n inprogress_filepath = filepath + \".incomplete\"\n # r = requests.get(url)\n # with open(inprogress_filepath, 'wb') as outfile:\n # outfile.write(r.content)\n\n inprogress_filepath, _ = urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress\n print()\n os.rename(inprogress_filepath, filepath)\n except HTTPError:\n if url.startswith(\"http\"):\n os.system('wget --no-check-certificat ' + url+\" -O \"+filepath.replace(\" \", \"\\ \"))\n\n else:\n raise ValueError(\"Unrecognized URI: \" + filepath)\n statinfo = os.stat(filepath)\n logger.info(\"Successfully downloaded %s, %s bytes.\" %\n (os.path.basename(filepath), statinfo.st_size))\n return filepath", "def download_data(url, filename, dst_dir):\r\n fullpath = os.path.join(dst_dir, filename)\r\n if os.path.exists(fullpath):\r\n return\r\n\r\n # Try to open url\r\n try:\r\n page = urlopen(url)\r\n except Exception:\r\n shutil.copy(PLACEHOLDER, fullpath)\r\n return\r\n\r\n f = open(fullpath, 'wb')\r\n while True:\r\n buff = page.read(BLOCK_SZ)\r\n if not buff:\r\n break\r\n f.write(buff)\r\n f.close()\r\n pass", "def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get(\"Content-Length\")\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, \"wb\") as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get('Content-Length')\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, 'wb') as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def download_url(url, destination_filename=None, progress_updater=None,\\\n force_download=False, quiet=True):\n \n # This is not intended to guarantee uniqueness, we just know it happens to guarantee\n # uniqueness for this application.\n if destination_filename is None:\n url_as_filename = url.replace('://', '_').replace('/', '_') \n destination_filename = \\\n os.path.join(temp_dir,url_as_filename)\n if (not force_download) and (os.path.isfile(destination_filename)):\n if not quiet:\n print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url)))\n return destination_filename\n print('Downloading file {} to {}'.format(os.path.basename(url),destination_filename),end='')\n urllib.request.urlretrieve(url, destination_filename, progress_updater) \n assert(os.path.isfile(destination_filename))\n nBytes = os.path.getsize(destination_filename)\n print('...done, {} bytes.'.format(nBytes))\n return destination_filename", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def urlretrieve(url, filename, reporthook=None, data=None):\n\n def chunk_read(response, chunk_size=8192, reporthook=None):\n content_type = response.info().get('Content-Length')\n total_size = -1\n if content_type is not None:\n total_size = int(content_type.strip())\n count = 0\n while True:\n chunk = response.read(chunk_size)\n count += 1\n if reporthook is not None:\n reporthook(count, chunk_size, total_size)\n if chunk:\n yield chunk\n else:\n break\n\n response = urlopen(url, data)\n with open(filename, 'wb') as fd:\n for chunk in chunk_read(response, reporthook=reporthook):\n fd.write(chunk)", "def _download_epw_file(url):\n r = requests.get(url)\n if r.ok:\n # py2 and 3 compatible: binary write, encode text first\n log.debug(\" ... OK!\")\n return io.StringIO(r.text)\n else:\n log.error(\" connection error status code: %s\" % r.status_code)\n r.raise_for_status()", "def download(filename, work_directory, source_url, overwrite=False):\n\n if not gfile.Exists(work_directory):\n gfile.MakeDirs(work_directory)\n\n filepath = os.path.join(work_directory, filename)\n\n if overwrite or not gfile.Exists(filepath):\n _filename, _ = urlretrieve_with_retry(source_url + filename)\n #print('_filename:', _filename)\n gfile.Copy(_filename, filepath, overwrite=overwrite)\n with gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n\n return filepath", "def download_file(url, filename, callback=None, headers=None, force_filename=False, allow_compression=True):\n url = str(url)\n filename = str(filename)\n if headers:\n for key, value in headers.items():\n headers[str(key)] = str(value)\n\n if allow_compression:\n if not headers:\n headers = {}\n headers[\"accept-encoding\"] = \"deflate, gzip, x-gzip\"\n\n scheme, host, port, path = client._parse(url)\n factory = HTTPDownloader(url, filename, callback, headers, force_filename, allow_compression)\n if scheme == \"https\":\n from twisted.internet import ssl\n reactor.connectSSL(host, port, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(host, port, factory)\n\n return factory.deferred", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath", "def _check_url_file (url, path_download, outfile) :\n if \"http://\" in url.lower () :\n dest = outfile if outfile != None else _get_file_url (url, path_download)\n down = False\n nyet = dest + \".notyet\"\n \n if os.path.exists (dest) and not os.path.exists (nyet) :\n try :\n fLOG(\"trying to connect\", url)\n f1 = urllib.urlopen (url)\n down = _first_more_recent (f1, dest)\n newdate = down\n f1.close ()\n except IOError :\n fLOG(\"unable to connect Internet, working offline for url\", url)\n down = False\n else : \n down = True\n newdate = False\n \n if down :\n if newdate : fLOG (\" downloading (updated) \", url)\n else : fLOG (\" downloading \", url)\n \n if len (url) > 4 and url [-4].lower () in [\".txt\", \".csv\", \".tsv\", \".log\"] :\n fLOG (\"creating text file \", dest)\n format = \"w\"\n else : \n fLOG (\"creating binary file \", dest)\n format = \"wb\"\n \n if os.path.exists (nyet) :\n size = os.stat (dest).st_size\n fLOG (\"resume downloading (stop at\", size, \") from \", url)\n request = urllib.request.Request(url) \n request.add_header(\"Range\", \"bytes=%d-\" % size)\n fu = urllib.request.urlopen (request) \n f = open (dest, format.replace (\"w\", \"a\"))\n else :\n fLOG (\"downloading \", url)\n request = urllib.request.Request(url) \n fu = urllib.request.urlopen (url)\n f = open (dest, format)\n \n open (nyet, \"w\").close ()\n c = fu.read (2**21)\n size = 0\n while len (c) > 0 :\n size += len (c)\n fLOG(\" size\", size)\n f.write (c)\n f.flush ()\n c = fu.read (2**21)\n fLOG (\"end downloading\")\n f.close ()\n fu.close ()\n os.remove (nyet)\n \n url = dest\n return url", "def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Successfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath", "def download_to_file(url, filename):\n with browser_spoof_open(url) as download_conn:\n with open(filename, \"wb\") as out_file:\n shutil.copyfileobj(download_conn, out_file, 1024 * 8)", "def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath", "def download_file(url, dl_filename):\r\n print( url )\r\n url_object=urlopen(url)\r\n dl_file_object=open(dl_filename,'wb')\r\n meta = url_object.info()\r\n file_size = 0\r\n if int(meta.get(\"Content-Length\", -1)) > 0:\r\n file_size = int(meta.get(\"Content-Length\", -1))\r\n if file_size == 0:\r\n print( \"Downloading: %s\" % (dl_filename.split('/')[-1]) )\r\n else:\r\n print( \"Downloading: %s Bytes: %s\" % (dl_filename.split('/')[-1], file_size) )\r\n\r\n current_file_size = 0\r\n block_size = 8192\r\n pbar = tqdm(\r\n total=file_size, initial=0, \r\n unit='B', unit_scale=True, desc=dl_filename.split('/')[-1] \r\n )\r\n while True:\r\n buffer = url_object.read(block_size)\r\n if not buffer:\r\n break\r\n current_file_size += len(buffer)\r\n dl_file_object.write(buffer)\r\n pbar.update(block_size)\r\n pbar.close()\r\n dl_file_object.close()", "def web_get_file(self, url):\n try:\n print(url)\n response = requests.get(url, verify=False)\n file_buffer = BytesIO(response.content)\n file_buffer.seek(0)\n return file_buffer\n except:\n print(traceback.print_exc())\n return None", "def download_file(target_url):\n resp = requests.get(target_url)\n with tempfile.NamedTemporaryFile('wb+', delete=False) as f:\n file_name = f.name\n f.write(resp.content)\n return file_name", "def robust_download(url, filename, tries=3, pause=0.1, timeout=30, verbose=True):\n\n if verbose:\n print('trying to download %s to file://%s' % (url, filename))\n\n @retry(tries=tries, pause=pause)\n @timelimit(timeout)\n def _download():\n with open(filename, 'wb') as f:\n [code, _, contents] = urlread(url)\n assert code == 200\n f.write(contents)\n return filename\n\n result = None\n with ignore_error():\n result =_download()\n\n # delete file on failure\n if not result:\n if verbose: print(' failed to download')\n if os.path.exists(filename):\n if verbose: print(' deleting file')\n os.remove(filename)\n return\n else:\n if verbose: print(' download successful')\n return filename", "def download(url, filename=None):\n\t# requirements os, shutil, urllib.parse, urllib.request\n\tif not filename:\n\t\turl_parts = urllib.parse.urlparse(url)\n\t\tfilename = os.path.basename(url_parts.path)\n\turl_h = urllib.request.urlopen(url)\n\twith open(filename, 'wb') as file_h:\n\t\tshutil.copyfileobj(url_h, file_h)\n\turl_h.close()\n\treturn", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def get_file_size(url: str):\n header = requests.head(url).headers\n if \"Content-Length\" in header and header[\"Content-Length\"] != 0:\n return int(header[\"Content-Length\"])\n elif \"Location\" in header:\n h = requests.head(header[\"Location\"]).headers\n return int(h.get(\"Content-Length\", 0))\n else:\n return 0", "def download_if_needed(url, filename):\n if os.path.exists(filename):\n print \"already exists\"\n else:\n wget.download(url)", "def download(url, filename):\n response = requests.get(url, stream=True)\n with open(filename, \"wb\") as handle:\n for data in response.iter_content():\n handle.write(data)", "def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path", "def download_file(url, fname):\n urllib.request.urlretrieve(url, fname)", "def perform_download(url, outdir=None):\n if outdir is None:\n outdir = os.getcwd()\n\n direct_link_path = urlparse(url).path\n path_parts = direct_link_path.split('/')\n file_name = path_parts[-1]\n\n output_full_path = os.path.join(outdir, file_name)\n\n r = requests.get(url, stream=True)\n \n file_size = int(r.headers[\"Content-Length\"])\n \n print(\"Starting download of {0} to {1} (file size = {2} bytes)\".format(file_name, output_full_path, file_size))\n \n output_file = open(output_full_path, 'wb')\n \n counter = 0\n chunksize = 1024\n previousPerCent = 0\n\n sys.stdout.write(\n '\\n\\r0% 0/{0}'.format(file_size)\n )\n sys.stdout.flush()\n\n for chunk in r.iter_content(chunk_size=chunksize):\n if chunk:\n output_file.write(chunk)\n output_file.flush()\n \n currentPercent = int((counter * chunksize) * 100 / file_size)\n\n if currentPercent > previousPerCent:\n previousPerCent = currentPercent\n \n sys.stdout.write(\n '\\r{0}% {1}/{2}'.format(currentPercent, counter * chunksize, file_size)\n )\n sys.stdout.flush()\n \n counter += 1\n\n output_file.close()\n\n sys.stdout.write('\\r100% {0}/{1}\\n'.format(file_size, file_size))\n\n print('\\nCompleted downloading to {0}\\n'.format(output_full_path))", "def download_file(url, local_filename):\n response = requests.get(url, stream=True)\n with open(local_filename, \"wb\") as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n outfile.write(chunk)", "def download(url, target):\n # Add progress bar via:\n # http://stackoverflow.com/a/22776/317916\n if not url:\n return None\n urlretrieve(url, target)\n return target", "def download(url, fpath):\n with open(fpath, 'wb') as f:\n try:\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n f_size = 0\n for chunk in r.iter_content(chunk_size=8192):\n if chunk:\n f.write(chunk)\n f_size += len(chunk)\n f_hash = get_hash_memory_optimized(fpath)\n return f_size, f_hash\n except Exception as e:\n os.remove(fpath)\n raise AdvarchsDownloadException('Could not download file.' + e.message)", "def download(url, server_fname, local_fname=None, progress_update_percentage=5):\n try:\n import urllib\n urllib.urlretrieve('http://google.com')\n except AttributeError:\n import urllib.request as urllib\n u = urllib.urlopen(url)\n if local_fname is None:\n local_fname = server_fname\n full_path = local_fname\n meta = u.info()\n with open(full_path, 'wb') as f:\n try:\n file_size = int(meta.get(\"Content-Length\"))\n except TypeError:\n print(\"WARNING: Cannot get file size, displaying bytes instead!\")\n file_size = 100\n print(\"Downloading: %s Bytes: %s\" % (server_fname, file_size))\n file_size_dl = 0\n block_sz = int(1E7)\n p = 0\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n file_size_dl += len(buffer)\n f.write(buffer)\n if (file_size_dl * 100. / file_size) > p:\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl *\n 100. / file_size)\n print(status)\n p += progress_update_percentage", "def download_file_nowget(url, fn, cookiejar):\n\tprint \"Downloading %s -> %s\" % (url, fn)\n\turlfile = get_opener(cookiejar).open(url)\n\tchunk_sz = 1048576\n\tbytesread = 0\n\tf = open(fn, \"wb\")\n\n\twhile True:\n\t\tdata = urlfile.read(chunk_sz)\n\t\tif not data:\n\t\t\tprint \".\"\n\t\t\tbreak\n\n\t\tf.write(data)\n\t\tbytesread += len(data)\n\t\tprint \"\\r%d bytes read\" % bytesread,\n\t\tsys.stdout.flush()", "def _download(self, url, output_dir, dataset, chunk_size=1024):\n r = self.session.get(url, stream=True, allow_redirects=True)\n if not r.ok:\n r = self.session.get(r.url, stream=True, allow_redirects=True, auth=(self._username, self._password))\n file_size = int(r.headers['Content-Length'])\n\n with tqdm(total=file_size, unit_scale=True, unit='B', unit_divisor=1024) as pbar:\n ### GET FILE NAME ###\n if \"Content-Disposition\" in r.headers.keys():\n local_filename = re.findall(\"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n local_filename = url.split(\"/\")[-3]\n local_filename = self.api.lookup(dataset, local_filename)[0]\n local_filename = local_filename + util.convert_to_extension(r.headers['content-type'])\n print(\"*** FNAME\", local_filename)\n\n local_filename = os.path.join(output_dir, local_filename)\n\n ### WRITE FILE ###\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n pbar.update(chunk_size)\n return local_filename", "def download_pdf( url, filename = None ):\n r = urlopen( Request( url ) )\n try:\n if filename is None:\n filename = give_filename( url )\n with open( filename, 'wb' ) as f:\n shutil.copyfileobj( r, f )\n finally:\n r.close()", "def _download(url):\n \n filename = url.split('/')[-1]\n if os.path.isfile(filename):\n info('Using pre-existed file {} from local system.'.format(filename))\n else:\n info('Downloading {} from OMA Database.'.format(url.split('/')[-1]))\n filename, _ = urlretrieve(url, filename)\n return filename", "def download_file(url, fname_out=None) -> None:\n\n import ssl\n\n try:\n with urllib.request.urlopen(url) as f:\n if not fname_out:\n return f.read().decode(\"utf-8\")\n else:\n fdir = os.path.dirname(fname_out)\n if not os.path.exists(fdir):\n os.makedirs(fdir)\n\n with open(fname_out, \"wb\") as outfile:\n outfile.write(f.read())\n return fname_out\n\n except ssl.SSLError:\n print(\"WHAT!\")\n sys.exit(1)", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def download(self, url: str) -> BytesIO:\n\n response = self.dwdfs.open(url, block_size=0)\n total = self.dwdfs.size(url)\n\n buffer = BytesIO()\n\n tqdm_out = TqdmToLogger(log, level=logging.INFO)\n\n with tqdm(\n desc=url,\n total=total,\n unit=\"iB\",\n unit_scale=True,\n unit_divisor=1024,\n file=tqdm_out,\n ) as bar:\n for data in read_in_chunks(response, chunk_size=1024):\n size = buffer.write(data)\n bar.update(size)\n\n return buffer", "def download (httpfile, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)\n return file", "def direct_download(url, progress_updater, headers=None):\n log_info(\"Directly Download with URL {0} ...\".format(url))\n\n if headers is None:\n headers = {'Host': urlparse(url).netloc,\n 'User-Agent': ukconfig.USER_AGENT,\n 'Connection': 'Keep-Alive'\n }\n\n # for test and cmd tools only\n if ukconfig.download_method == 'wget':\n data = wget_download(url, progress_updater, headers)\n else:\n data = requests_download(url, progress_updater, headers)\n if len(data) < ukconfig.FILE_SIZE_MINIMUM:\n raise FileCorrupted(\"File too small: \" + parse_file_size(len(data)))\n return data", "def download_file(url, dest=None, force=False, trusted=False):\n url, filename = get_save_path(url, dest, force)\n keep_going = True\n success = False\n if url is None:\n return 'Aborted!'\n\n if url:\n success = download_wget(url, filename, trusted) # Try wget\n if not success:\n success = download_urllib(url, filename) # Try urllib\n if not success:\n success = download_pip(url, filename, force, trusted) # Try urllib\n if not success:\n split_url = url.split('/')\n msg = '\\n'.join([\n \"\\n\\nERROR in Web Access! - You may be behind a firewall!\",\n \"-\" * 52,\n \"You should be able to bybass this by using a browser to download:\",\n \"\\t%s\\nfrom:\\t%s\\nthen copying the download file to:\\n\\t%s\" % (\n split_url[-1], '/'.join(split_url[:-1]), filename),\n ])\n print(msg, '\\n')\n wx.MessageBox(msg, caption='WDOWNLOAD ERROR!',\n style=wx.OK|wx.CENTRE|wx.ICON_ERROR)\n return \"FAILURE or Abort!\"\n\n return filename", "def _download_if_needed(file_path, url, show_progress):\n if file_path.exists() and not file_path.is_file():\n raise NotAFileError(file_path)\n elif not file_path.exists():\n get_logger().info('Downloading %s ...', file_path)\n reporthook = None\n if show_progress:\n reporthook = _UrlRetrieveReportHook()\n urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)\n if show_progress:\n print()\n else:\n get_logger().info('%s already exists. Skipping download.', file_path)", "def __download_file(self, filename):\r\n \r\n respons = requests.get(self.__url + filename, stream=True)\r\n save_filename = os.path.join(self.__folder, os.path.basename(filename))\r\n with open(save_filename, 'wb') as output_file:\r\n for chunk in respons.iter_content(chunk_size=128):\r\n output_file.write(chunk)", "def download_file(url,file_name):\n #http://stackabuse.com/download-files-with-python/\n filedata = urllib2.urlopen(url)\n datatowrite = filedata.read()\n with open(file_name, 'wb') as f:\n f.write(datatowrite)", "def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)", "def _maybe_download(self, filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n return filepath", "def download_file(file_link, file_path):\n if not os.path.exists(file_path):\n with open(file_path, \"wb\") as f:\n print(\"\\nDownloading %s\" % file_path)\n response = requests.get(file_link, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()", "def _Download(url):\n response = urllib2.urlopen(url)\n if response.code != 200:\n raise RuntimeError('Failed to download \"%s\".' % url)\n return response.read()" ]
[ "0.81564283", "0.81564283", "0.81564283", "0.79952234", "0.7990289", "0.79747564", "0.7969037", "0.7969037", "0.7902681", "0.7736243", "0.761421", "0.7601226", "0.7358599", "0.7322421", "0.7298379", "0.7141095", "0.71260726", "0.6950416", "0.6945195", "0.6897762", "0.6878477", "0.68499804", "0.6838193", "0.6838193", "0.68134135", "0.67548865", "0.6725053", "0.67074996", "0.6691224", "0.6669001", "0.6643241", "0.66423565", "0.66280025", "0.6616834", "0.66109294", "0.65475696", "0.65382147", "0.65143675", "0.6498142", "0.6473094", "0.64729303", "0.6467471", "0.6464393", "0.64534277", "0.6449992", "0.64452475", "0.6419113", "0.64185387", "0.640677", "0.63916564", "0.63720816", "0.6371575", "0.6367544", "0.63640726", "0.6363498", "0.6362444", "0.6361664", "0.63372326", "0.6314338", "0.6311691", "0.63068676", "0.6304928", "0.62979597", "0.62931913", "0.6283701", "0.6283593", "0.6279029", "0.6267025", "0.6262689", "0.62482226", "0.6205947", "0.62011987", "0.62005544", "0.61892843", "0.618569", "0.61730164", "0.61705256", "0.61596465", "0.6151896", "0.61455446", "0.6144832", "0.6140677", "0.61296266", "0.61256945", "0.6125419", "0.61221", "0.6115291", "0.61132145", "0.6111817", "0.6083388", "0.6081281", "0.6073275", "0.60709155", "0.6054599", "0.6054299", "0.6052967", "0.6050991", "0.60377264", "0.6029217", "0.6027037" ]
0.7610241
11
Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def rate(self, dataset, targets):\n raise NotImplementedError", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def rate(self, newrate):\n command = 'rate ' + str(newrate)\n self.run_command(command)", "def test_tag_rates_on_duplicate_metric_per_cost_type(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n cost_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n ],\n \"currency\": \"USD\",\n }\n cost_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_key=\"k1\", tag_values=tag_values_kwargs)\n cost_model[\"rates\"][1][\"tag_rates\"] = format_tag_rate(tag_key=\"k2\", tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=cost_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n serializer.data", "def update_metrics(self, metrics, predictions, labels):\n return", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def incr(\n self,\n stat: str,\n count: int = 1,\n rate: float = 1,\n tags: Attributes = None,\n ):\n if _skip_due_to_rate(rate):\n return\n if count < 0:\n raise ValueError(\"count must be a positive value.\")\n\n if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):\n counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat), attributes=tags)\n counter.add(count, attributes=tags)\n return counter", "def metrics(self, metrics):\n\n self._metrics = metrics", "def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def gauge(self, gauge, value):\n try:\n self._thread_pool_executor.submit(self._delegate.gauge, gauge, value)\n except:\n self._logger.exception('Exception caught submitting gauge metric')", "def test_add_tag_to_derived_metric(self):\n pass", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def _process_rating(self, metadata: MetadataTransformModel | None):\n self.add_rating(self._transform_value(metadata))", "def rate(self, rate):\n\n self._rate = rate", "def rate(self, rate):\n\n self._rate = rate", "def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating, counts)*1.1 if is_gs else self.calculate_k(rating, counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating", "def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating,counts)*1.1 if is_gs else self.calculate_k(rating,counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)", "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def submit_invocations_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.invocations\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )", "def sum(self, key, value):\n self._metrics[key] += value", "def set_metrics(self):", "def add_metric_cost(self, obj, val):\n if self.conn is None:\n return\n\n key = \"{}_metric\".format(obj)\n self.conn.incrby(key, int(val))", "def set_metrics(self, metrics: List[Callable]) -> None:\n self.metrics = metrics", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def _increment_token_weight(self, weights: {str: int}, token=None, tag=None, weight=1) -> None:\n if tag:\n for node in self.soup.find_all(tag):\n for token in re.findall(\"[a-zA-Z\\d]+\", node.get_text()):\n weights[token] += weight\n elif token:\n weights[token] += weight", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def __call__(self, rate:'kW'):\n self.rate = rate\n self.cost = self.price * rate", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def test_tag_key_can_be_multiple_cost_types(self):\n value_kwargs = [{\"value\": 0.1, \"default\": True, \"usage_start\": 1, \"usage_end\": 10}]\n tag_rates_list = []\n cost_types = [\"Infrastructure\", \"Supplementary\"]\n for cost_type in cost_types:\n rate = {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}, \"cost_type\": cost_type}\n rate[\"tag_rates\"] = format_tag_rate(tag_values=value_kwargs)\n tag_rates_list.append(rate)\n self.basic_model[\"rates\"] = tag_rates_list\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n data = serializer.data\n rates = data.get(\"rates\", [])\n self.assertEqual(len(rates), 2)\n for rate in rates:\n tag_rate = rate.get(\"tag_rates\")\n self.assertIsNotNone(tag_rate)\n # Check cost types\n result_cost_type = rate[\"cost_type\"]\n self.assertIn(result_cost_type, cost_types)\n cost_types.remove(result_cost_type)\n # Check that to_representation is working\n tag_value = tag_rate[\"tag_values\"][0]\n decimals = [tag_value[\"value\"], tag_value[\"usage\"][\"usage_start\"], tag_value[\"usage\"][\"usage_end\"]]\n for expected_decimal in decimals:\n self.assertIsInstance(expected_decimal, Decimal)", "def metric(self, slug, num=1, category=None, expire=None, date=None):\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n if category:\n self._categorize(slug, category)\n\n # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an\n # incrby method; .incr accepts a second ``amount`` parameter.\n keys = self._build_keys(slug, date=date)\n\n # Use a pipeline to speed up incrementing multiple keys\n pipe = self.r.pipeline()\n for key in keys:\n pipe.incr(key, num)\n if expire:\n pipe.expire(key, expire)\n pipe.execute()", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def set_metric(self, slug, value, category=None, expire=None, date=None):\n keys = self._build_keys(slug, date=date)\n\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n # Construct a dictionary of key/values for use with mset\n data = {}\n for k in keys:\n data[k] = value\n self.r.mset(data)\n\n # Add the category if applicable.\n if category:\n self._categorize(slug, category)\n\n # Expire the Metric in ``expire`` seconds if applicable.\n if expire:\n for k in keys:\n self.r.expire(k, expire)", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def gauge(\n self,\n stat: str,\n value: int | float,\n rate: float = 1,\n delta: bool = False,\n *,\n tags: Attributes = None,\n back_compat_name: str = \"\",\n ) -> None:\n if _skip_due_to_rate(rate):\n return\n\n if back_compat_name and self.metrics_validator.test(back_compat_name):\n self.metrics_map.set_gauge_value(\n full_name(prefix=self.prefix, name=back_compat_name), value, delta, tags\n )\n\n if self.metrics_validator.test(stat):\n self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), value, delta, tags)", "def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def log_metrics(engine: Engine, tag: str) -> None:\n metrics_format = \"{0} [{1}/{2}]: {3}\".format(\n tag, engine.state.epoch, engine.state.iteration, engine.state.metrics\n )\n engine.logger.info(metrics_format)", "def do_submit(self, price_float, volume_float):\r\n raise NotImplementedError()", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def rate_per_unit(self, rate_per_unit):\n\n self._rate_per_unit = rate_per_unit", "def count(self, counter, delta):\n try:\n self._thread_pool_executor.submit(self._delegate.count, counter, delta)\n except:\n self._logger.exception('Exception caught submitting count metric')", "def rate_up(self,req):\n try:\n self.rating=self.upratings[self.rating+4]\n self.flush()\n self.log_track(self.uid,'rated',self.rating)\n except:\n pass\n return self.rating_return(req)", "def test_multiple_tag_values(self):\n value_kwargs = [\n {\"tag_value\": \"value_one\", \"value\": 0.1, \"default\": True},\n {\"tag_value\": \"value_two\", \"value\": 0.2},\n ]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=value_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n data = serializer.data\n rates = data[\"rates\"]\n self.assertEqual(len(rates), 1)\n for rate in rates:\n tag_rate = rate.get(\"tag_rates\")\n self.assertIsNotNone(tag_rate)\n tag_values = tag_rate[\"tag_values\"]\n self.assertEqual(len(tag_values), 2)", "def add_gauge(self, data, metric_id=None):\n self._post_data(prefix_id='gauges', data=data, metric_id=metric_id)", "def add_metric(self, *, id: str, prefix: typing.Optional[str]=None, tag_filters: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> None:\n metric = BucketMetrics(id=id, prefix=prefix, tag_filters=tag_filters)\n\n return jsii.invoke(self, \"addMetric\", [metric])", "def set(self, labels: dict[str, str], value: float | None):\n\n labelstr = _get_label_string(labels)\n\n # If we do not know this instance yet\n if labelstr not in self._data:\n # we do not add new metrics without assigned value\n if value is None:\n return\n\n # we don't know this instance yet, so we create a new one\n self._data[labelstr] = MetricInstance(\n metric=self, labels=labels, value=value\n )\n\n # we already know this instance\n else:\n # if the value is None, we remove it\n if value is None:\n del self._data[labelstr]\n else:\n # we know this instance, so we update its value\n instance = self._data[labelstr]\n instance.value = value", "def optimize_metrics(self,\n metrics: list = None,\n verbose: bool = True):\n\n if metrics is None:\n metrics = self._supported_metrics\n else:\n metrics = [metric.lower() for metric in metrics]\n assert all(metric in self._supported_metrics for metric in metrics)\n for i in metrics:\n super(ThresholdOptimizer, self).__getattribute__(f'get_best_{i}_metrics')(verbose=verbose)", "def rating(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'rating': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def post_rating():\n\n id = request.args.get('id')\n\n rating = request.args.get('rating')\n\n record = mod.provide_rating(id, int(rating))\n\n return jsonify(record)", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def PostModelVersionMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def rate_video(self, params):\n video_id = params.get('video_id', [''])[0]\n rating = params.get('rating', [''])[0]\n rate = self.netflix_session.rate_video(\n video_id=video_id,\n rating=rating)\n return rate", "def submit_generic(host,\n plugin,\n typename,\n values,\n type_instance=None,\n plugin_instance=None):\n metric = collectd.Values()\n metric.host = host\n metric.plugin = plugin\n metric.type = typename\n if plugin_instance:\n metric.plugin_instance = plugin_instance\n if type_instance:\n metric.type_instance = type_instance\n if type(values) == list:\n metric.values = values\n elif type(values) in [int, float]:\n metric.values = [values]\n else:\n collectd.error('Unsupported values type. %s' % type(values))\n return\n metric.dispatch()", "def QueueMetricsView(request): # pylint: disable=invalid-name\n return ExportToDjangoView(request, view=\"rq-metrics\")", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def put_gauge(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def gauge(self, slug, current_value):\n k = self._gauge_key(slug)\n self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges\n self.r.set(k, current_value)", "def create_metric(self) -> EvalMetric:\n pass", "def metric(env, metric):\n envs = environments()\n check_env(env, envs)\n\n name = unquote(metric)\n metric = get_or_abort(puppetdb.metric, metric)\n return render_template(\n 'metric.html',\n name=name,\n metric=sorted(metric.items()),\n envs=envs,\n current_env=env)", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None):\n\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def gauge(self, gauge, value):\n pass", "def gauge_v5(self, metric, value, tags=None, hostname=None, device_name=None, timestamp=None):\n # Make sure we get the original arguments back\n assert metric == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None\n assert timestamp == METRIC_TIMESTAMP", "def add(self, url, **params):\n\n if 'tags' in params and isinstance(params['tags'], basestring):\n params['tags'] = params['tags'].split(',')\n\n self.queue('add', url=url, **params)", "def create(self, label_id):\n data = {\n 'type': 'tagit',\n 'rate_count': 0,\n 'rate_range': 'day',\n 'limit_count': 0,\n 'limit_range': 'day',\n 'schedule': [],\n 'enabled': True,\n 'args': {\n 'sn': label_id,\n 'tag_sn': label_id\n }\n }\n # Yes, it's confusing. the `/actions/` endpoint is used for tags, while\n # the /tags/ endpoint is used for labels.\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.ACTIONS.value,\n params=data\n )", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def upload_file(metrics_file, project, dataset, table, model_name=None):\n with open(metrics_file) as fp:\n metrics = json.load(fp)\n\n metrics = convert_format_in_metrics_list(metrics)\n\n benchmark_run = [{\n 'metrics': metrics,\n 'upload_ts': _current_epoch_secs(),\n 'model_name': model_name,\n }]\n\n return upload_metrics(benchmark_run, project, dataset, table)", "def rating(self, **kwargs):\n\n data = dict()\n data['value'] = kwargs.get('value') or None\n\n path = self._get_movie_id_path('rating')\n resp = self._post_method(path, kwargs, data)\n\n return resp", "def update_recall_rate(params, name, recall, k):\n if name in params:\n for key in params[name]:\n params[name][key] += recall[key] / k\n else:\n params[name] = {}\n for key in recall:\n params[name][key] = recall[key] / k", "def do_user_rating(parser, token):\n argv = token.contents.split()\n argc = len(argv)\n\n if argc != 4:\n raise TemplateSyntaxError('Tag %s takes three arguments.' % argv[0])\n if argv[2] != \"as\":\n raise TemplateSyntaxError('Second argument must be \"as\" for tag %s' %\n argv[0])\n\n return GetUserRating(argv[1], argv[3])", "def with_meter(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n name = '.'.join((get_function_name(fn), 'rate'))\n return metrics.with_meter(name)(fn)(*args, **kwargs)\n return wrapper", "def create_metering_label_rule(self, body=None):\r\n return self.post(self.metering_label_rules_path, body=body)", "async def send_add_metric(self, title: str, metric_type: str) -> None:\n msg, sending_dialogue = self.dialogues.create(\n counterparty=self.prometheus_address,\n performative=PrometheusMessage.Performative.ADD_METRIC,\n title=title,\n type=metric_type,\n description=\"a gauge\",\n labels={},\n )\n assert sending_dialogue is not None\n\n envelope = Envelope(\n to=msg.to,\n sender=msg.sender,\n message=msg,\n )\n await self.prometheus_con.send(envelope)", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def set_current_rate(self, rate_to_set):\n pass", "def register(self, gauge):\r\n raise NotImplementedError", "def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None):\n _add_serve_context_tag_values(self._tag_keys, tags)\n super().inc(value, tags)" ]
[ "0.65285105", "0.58314323", "0.57720864", "0.55562365", "0.5346391", "0.5288677", "0.5270663", "0.5265564", "0.51896477", "0.51511014", "0.5126361", "0.51259017", "0.51075906", "0.5091107", "0.5091107", "0.5021949", "0.5016198", "0.4961651", "0.49544987", "0.49520537", "0.49461487", "0.49023053", "0.48914933", "0.4881529", "0.4881529", "0.4844227", "0.48373455", "0.48332232", "0.48079944", "0.48069996", "0.47949073", "0.47467178", "0.47329244", "0.47011542", "0.470098", "0.46361482", "0.46171454", "0.46167243", "0.46147197", "0.46114862", "0.46085897", "0.46077904", "0.46000686", "0.45925206", "0.45918724", "0.45900038", "0.45888168", "0.45882505", "0.4586275", "0.456109", "0.4550705", "0.45397347", "0.45389047", "0.45386738", "0.45203254", "0.45126128", "0.45041487", "0.4493576", "0.44721904", "0.44712025", "0.44700593", "0.4465774", "0.4462305", "0.44368282", "0.44324076", "0.44322622", "0.44308284", "0.44305938", "0.4424792", "0.44220856", "0.44213563", "0.44181073", "0.44174543", "0.44133544", "0.4407755", "0.44028425", "0.44025367", "0.4399937", "0.43904293", "0.43888324", "0.43870637", "0.43810502", "0.43764415", "0.43744224", "0.43717387", "0.43713567", "0.4359478", "0.4355777", "0.43556362", "0.43475458", "0.4341086", "0.43322766", "0.43298507", "0.43283457", "0.43221822", "0.43166226", "0.4310666", "0.431021", "0.43091947", "0.4308734" ]
0.7829274
0
Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gauge(self, gauge, value):\n try:\n self._thread_pool_executor.submit(self._delegate.gauge, gauge, value)\n except:\n self._logger.exception('Exception caught submitting gauge metric')", "def add_gauge(self, data, metric_id=None):\n self._post_data(prefix_id='gauges', data=data, metric_id=metric_id)", "def gauge(self, gauge, value):\n pass", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def put_gauge(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def gauge_v5(self, metric, value, tags=None, hostname=None, device_name=None, timestamp=None):\n # Make sure we get the original arguments back\n assert metric == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None\n assert timestamp == METRIC_TIMESTAMP", "def metrics_gauge(self, gauge_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='gauge')\n return self._post(url, gauge_data)", "def register(self, gauge):\r\n raise NotImplementedError", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def gauge(name, value):\n metric = _get_metric(name) or metrics.new_gauge(name)\n metric.notify(value)", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))", "async def send_add_metric(self, title: str, metric_type: str) -> None:\n msg, sending_dialogue = self.dialogues.create(\n counterparty=self.prometheus_address,\n performative=PrometheusMessage.Performative.ADD_METRIC,\n title=title,\n type=metric_type,\n description=\"a gauge\",\n labels={},\n )\n assert sending_dialogue is not None\n\n envelope = Envelope(\n to=msg.to,\n sender=msg.sender,\n message=msg,\n )\n await self.prometheus_con.send(envelope)", "def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def gauge(self, slug, current_value):\n k = self._gauge_key(slug)\n self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges\n self.r.set(k, current_value)", "def batch_gauge(self, metric_dict, prefix='stalker.'):\n if not self.enabled:\n return\n payload = []\n for k in metric_dict:\n payload.append('%s%s:%d|g' % (prefix, k, metric_dict[k]))\n self._send_events(payload)", "def update_gauge(self):\n try:\n self._thread_pool_executor.submit(self._update_gauge_fn)\n except:\n self._logger.exception('Exception caught submitting gauge metrics update task.')", "def gauge_v6(self, name, value, tags=None, hostname=None, device_name=None):\n # Make sure we get the original arguments back and timestamp is not being received\n assert name == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None", "def update_gauge(self):\n pass # Do nothing", "def add_favorite_gauge(params, match):\n gauge_no = match.group(1)\n gauge_name = match.group(2)\n table = get_gauges_table()\n table.put_item(Item={\n 'USGSSiteNumber': gauge_no,\n 'GuageName': gauge_name\n })\n return lambda_response(None, \"added gauge %s %s\" % (gauge_no, gauge_name))", "def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge", "def gauge(\n self,\n stat: str,\n value: int | float,\n rate: float = 1,\n delta: bool = False,\n *,\n tags: Attributes = None,\n back_compat_name: str = \"\",\n ) -> None:\n if _skip_due_to_rate(rate):\n return\n\n if back_compat_name and self.metrics_validator.test(back_compat_name):\n self.metrics_map.set_gauge_value(\n full_name(prefix=self.prefix, name=back_compat_name), value, delta, tags\n )\n\n if self.metrics_validator.test(stat):\n self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), value, delta, tags)", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def set_gauge_value(self, name: str, value: float | None, delta: bool, tags: Attributes):\n key: str = _generate_key_name(name, tags)\n new_value = value or DEFAULT_GAUGE_VALUE\n old_value = self.poke_gauge(name, tags)\n if delta:\n new_value += old_value\n # If delta is true, add the new value to the last reading otherwise overwrite it.\n self.map[key] = Observation(new_value, tags)", "def add(self, value, source=None, **params):\n\t\treturn self.connection.send_gauge_value(self.name, value, source, **params)", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def _register_if_needed(self, metric_point: MetricPoint):\n metric_name = metric_point.metric_name\n metric_description = metric_point.description\n metric_units = metric_point.units\n if self._registry[metric_name] is None:\n tags = metric_point.tags\n metric_tags = []\n for tag_key in tags:\n metric_tags.append(tag_key_module.TagKey(tag_key))\n\n metric = Gauge(metric_name, metric_description, metric_units,\n metric_tags)\n self._registry[metric_name] = metric\n self.view_manager.register_view(metric.view)\n\n # If there are missing description & unit information,\n # we should notify cpp processes that we need them.\n if not metric_description or not metric_units:\n self._missing_information = True\n\n if metric_description and metric_units:\n self._registry[metric_name].view._description = metric_description\n self._registry[\n metric_name].view.measure._description = metric_description\n self._registry[metric_name].view.measure._unit = metric_units\n self._missing_information = False", "def dispatch(self, host, obj_type, obj_instance, value):\n\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def gauge(self, stat, value, sample_rate=1):\n stats = {stat: \"%f|g\" % value}\n self.send(stats, sample_rate)", "def test_gauge(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name, self.gauge_metric_help, self.gauge_metric_type, []\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.gauge_metric_name)\n self.assertEqual(mf.help, self.gauge_metric_help)\n self.assertEqual(mf.type, self.gauge_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Gauge with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Gauge can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)", "def test_gauge(self):\n with patch(\"redis_metrics.templatetags.redis_metric_tags.get_r\") as mock_r:\n inst = mock_r.return_value\n inst.get_gauge.return_value = 100\n\n size = 50\n maximum = 200\n result = taglib.gauge(\"test-slug\", maximum, size)\n expected_result = {\n 'slug': \"test-slug\",\n 'current_value': 100,\n 'max_value': maximum,\n 'size': size,\n 'diff': maximum - 100\n }\n self.assertEqual(result, expected_result)\n mock_r.assert_called_once_with()\n inst.get_gauge.assert_called_once_with(\"test-slug\")", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def gauge_int_timeseries(resource_type, resource_labels, metric_type,\n metric_labels, value):\n series = monitoring_v3.TimeSeries()\n series.metric.type = metric_type\n series.metric.labels.update(metric_labels)\n series.resource.type = resource_type\n series.resource.labels.update(resource_labels)\n series.metric_kind = 'GAUGE'\n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10**9)\n interval = monitoring_v3.TimeInterval(\n {'end_time': {\n 'seconds': seconds,\n 'nanos': nanos\n }})\n point = monitoring_v3.Point({\n 'interval':\n interval,\n 'value':\n monitoring_v3.TypedValue(int64_value=value)\n })\n series.points = [point]\n return series", "def test_add_tag_to_derived_metric(self):\n pass", "def list_gauge(self, metric_id, **kwargs):\n prefix_id = \"gauges/{}\".format(urlquote(metric_id, safe=''))\n return self._list_data(prefix_id=prefix_id, **kwargs)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def metric(env, metric):\n envs = environments()\n check_env(env, envs)\n\n name = unquote(metric)\n metric = get_or_abort(puppetdb.metric, metric)\n return render_template(\n 'metric.html',\n name=name,\n metric=sorted(metric.items()),\n envs=envs,\n current_env=env)", "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def untag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag not in metric.tags.all():\n error += 'This metric isn\\'t tagged with this tag.'\n\n if not error:\n try:\n metric.tags.remove(tag)\n except:\n error += 'Couldn\\'t remove tag from metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def visdom_push_metrics(vis, metrics):\n visdom_send_metrics(vis, metrics, 'replace')", "def submit_generic(host,\n plugin,\n typename,\n values,\n type_instance=None,\n plugin_instance=None):\n metric = collectd.Values()\n metric.host = host\n metric.plugin = plugin\n metric.type = typename\n if plugin_instance:\n metric.plugin_instance = plugin_instance\n if type_instance:\n metric.type_instance = type_instance\n if type(values) == list:\n metric.values = values\n elif type(values) in [int, float]:\n metric.values = [values]\n else:\n collectd.error('Unsupported values type. %s' % type(values))\n return\n metric.dispatch()", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def get_gauge_data(self, gauge_name, min_date_key=None, max_date_key=None):\n pass", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)", "def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()", "def metrics(self, metrics):\n\n self._metrics = metrics", "def save_data(self, gauge_name, date_key, data):\n pass", "def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)", "def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None):\n\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def create_metering_label(self, body=None):\r\n return self.post(self.metering_labels_path, body=body)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def check_gauge(params, match):\n gauge_no = match.group(1)\n stats_url = USGS_STATS_URL_TEMPLATE % gauge_no\n graph_url = USGS_GRAPH_URL_TEMPLATE % gauge_no\n\n response = requests.get(stats_url)\n last_measurement = response.text.strip().split(\"\\n\")[-1]\n _, _, _, mtime, tz, cfs, _ = re.split('\\s+', last_measurement)\n\n return lambda_response(None, {\n \"text\": \"Last measurement: %s cfs @ %s %s\" % (cfs, mtime, tz),\n \"attachments\": [{ \"image_url\": graph_url }]\n })", "def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None", "def poke_gauge(self, name: str, attributes: Attributes = None) -> GaugeValues:\n key = _generate_key_name(name, attributes)\n if key not in self.map:\n self._create_gauge(name, attributes)\n\n return self.map[key].value", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)", "def register(metric, name=None):\n return set_namespace(registered, metric, name=name, set_global=True)", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def gauge(x):\n fig = go.Figure(go.Indicator(\n mode=\"gauge+number\",\n value=x,\n domain={'x': [0, 1], 'y': [0, 1]},\n title={'text': \"Streaks\", 'font': {'size': 24}},\n gauge={\n 'axis': {'range': [None, 7], 'tickwidth': 1,\n 'tickcolor': \"darkblue\"},\n 'bar': {'color': \"#394387\"},\n 'bgcolor': \"white\",\n 'borderwidth': 2,\n 'bordercolor': \"gray\",\n 'steps': [\n {'range': [0, 2], 'color': 'rgba(255, 99, 132, 0.6)'},\n {'range': [2, 5], 'color': 'rgba(255, 206, 86, 0.6)'},\n {'range': [5, 7], 'color': 'rgba(75, 192, 192, 0.6)'}],\n 'threshold': {\n 'line': {'color': '#282f5f', 'width': 4},\n 'thickness': 1,\n 'value': x}}))\n fig.update_layout(\n paper_bgcolor=\"rgba(0,0,0,0)\",\n plot_bgcolor='rgba(0,0,0,0)',\n font={\n 'color': \"#222851\",\n 'family': \"Arial\"})\n # return fig.write_image(\"templates/gauge.png\")\n plot = fig.write_image(\"gauge.png\")\n with open(\"gauge.png\", \"rb\") as imageFile:\n str = base64.b64encode(imageFile.read())\n return str", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def gcp_create_metric_descriptor(project_id: str):\n client = monitoring_v3.MetricServiceClient()\n project_name = client.project_path(project_id)\n\n for desc_type, desc_desc in [\n [\"buildbots_percent_failed\", \"Percentage of failed builds\"],\n [\"buildbots_builds_successful\", \"Number of successful builds in the last 24h.\"],\n [\"buildbots_builds_failed\", \"Number of failed builds in the last 24h.\"],\n [\"buildbots_builds_total\", \"Total number of builds in the last 24h.\"],\n ]:\n\n descriptor = monitoring_v3.types.MetricDescriptor()\n descriptor.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type)\n descriptor.metric_kind = (\n monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)\n descriptor.value_type = (\n monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)\n descriptor.description = desc_desc\n descriptor = client.create_metric_descriptor(project_name, descriptor)\n print('Created {}.'.format(descriptor.name))", "def update(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.update, params)", "def add_gauge_server(self, data, feed_id, server_id, metric_enum):\n metric_id = self._metric_id_gauge_server(feed_id=feed_id, server_id=server_id,\n metric_enum=metric_enum)\n self.add_gauge(data=data, metric_id=metric_id)", "def test_gauge_when_overloaded(self):\n with patch(\"redis_metrics.templatetags.redis_metric_tags.get_r\") as mock_r:\n inst = mock_r.return_value\n inst.get_gauge.return_value = 500\n\n size = 50\n maximum = 200\n result = taglib.gauge(\"test-slug\", maximum, size)\n expected_result = {\n 'slug': \"test-slug\",\n 'current_value': 500,\n 'max_value': maximum,\n 'size': size,\n 'diff': 0, # deff should default to 0 when overloaded.\n }\n self.assertEqual(result, expected_result)\n mock_r.assert_called_once_with()\n inst.get_gauge.assert_called_once_with(\"test-slug\")", "def set_metric(self, slug, value, category=None, expire=None, date=None):\n keys = self._build_keys(slug, date=date)\n\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n # Construct a dictionary of key/values for use with mset\n data = {}\n for k in keys:\n data[k] = value\n self.r.mset(data)\n\n # Add the category if applicable.\n if category:\n self._categorize(slug, category)\n\n # Expire the Metric in ``expire`` seconds if applicable.\n if expire:\n for k in keys:\n self.r.expire(k, expire)", "def add_gauge_datasource(self, data, feed_id, server_id, resource_id, metric_enum):\n metric_id = self._metric_id_guage_datasource(feed_id=feed_id, server_id=server_id,\n resource_id=resource_id,\n metric_enum=metric_enum)\n self.add_gauge(data=data, metric_id=metric_id)", "def add_metric(self, *, id: str, prefix: typing.Optional[str]=None, tag_filters: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> None:\n metric = BucketMetrics(id=id, prefix=prefix, tag_filters=tag_filters)\n\n return jsii.invoke(self, \"addMetric\", [metric])", "def _send_picklemetrics(metrics):\n\n metrics = [\n (metric_name, (timestamp, value)) for (metric_name, value, timestamp) in metrics\n ]\n\n data = pickle.dumps(metrics, -1)\n payload = struct.pack(b\"!L\", len(data)) + data\n\n return payload", "def create(self,\n metric_type,\n metric_kind='GAUGE',\n value_type='DOUBLE',\n description='N/A'):\n descriptor = ga_metric.MetricDescriptor()\n if metric_type.startswith('custom.googleapis.com/'):\n descriptor.type = metric_type\n else:\n descriptor.type = 'custom.googleapis.com/%s' % metric_type\n descriptor.metric_kind = (getattr(ga_metric.MetricDescriptor.MetricKind,\n metric_kind))\n descriptor.value_type = (getattr(ga_metric.MetricDescriptor.ValueType,\n value_type))\n descriptor.description = description\n LOGGER.info(f'Creating metric descriptor \"{descriptor.type}\" ...')\n return self.client.create_metric_descriptor(\n name=self.project, metric_descriptor=descriptor)", "def add(self, delta):\r\n if not isinstance(delta, Compatibility.integer):\r\n raise TypeError('AtomicGauge.add must be called with an integer.')\r\n with self.lock():\r\n self._value += delta\r\n return self._value", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def gauge(ax, row, col, params):\n if col == \"high\":\n if pd.notna(row[\"high_record\"]):\n params.maxval = row[\"high_record\"]\n if pd.notna(row[\"high_normal\"]):\n params.avgval = row[\"high_normal\"]\n else:\n if pd.notna(row[\"low_record\"]):\n params.minval = float(row[\"low_record\"])\n if pd.notna(row[\"low_normal\"]):\n params.avgval = row[\"low_normal\"]\n\n # Polar coordinates, so 0 is maxval and pi is minval\n colors = [\"#BE0000\", \"#E48900\", \"#B6EB7A\", \"#0F4CBB\", \"#1B262C\"]\n # Okay, the chart will go from maxval (rad=pi) to maxval (rad=0)\n bar_ends = [\n float(params.avgval + 2 * params.stddev),\n float(params.avgval + params.stddev),\n float(params.avgval - params.stddev),\n float(params.avgval - 2 * params.stddev),\n params.minval,\n ]\n labels = [r\"2$\\sigma$\", r\"$\\sigma$\", r\"-$\\sigma$\", r\"-2$\\sigma$\", \"\"]\n pos = 0\n positive_delta = float(params.maxval - params.avgval)\n negative_delta = float(params.avgval - params.minval)\n if positive_delta == 0:\n positive_delta = 0.01\n if negative_delta == 0:\n negative_delta = 0.01\n for val, color, label in zip(bar_ends, colors, labels):\n if val > params.avgval:\n ha = \"left\"\n if val > params.maxval:\n continue\n pos2 = (params.maxval - val) / positive_delta * pi / 2.0\n else:\n ha = \"right\"\n if val < params.minval:\n continue\n pos2 = pi / 2.0 + (\n (params.avgval - val) / negative_delta * pi / 2.0\n )\n ax.add_patch(Rectangle((pos, 1), pos2 - pos, 2, color=color))\n if abs(val - params.minval) > 1 and abs(val - params.maxval) > 1:\n ax.text(pos2, 3.1, f\"{val:.0f}\", ha=ha)\n ax.text(\n pos2,\n 0.8,\n label,\n va=\"center\",\n ha=\"left\" if ha == \"right\" else \"right\",\n )\n pos = pos2\n # manual placement of max/min\n ax.text(\n 0 if col == \"low\" else pi,\n 3.1,\n f\"{params.maxval:.0f}\" if col == \"low\" else f\"{params.minval:.0f}\",\n ha=\"left\" if col == \"low\" else \"right\",\n )\n\n # Add ticks for percentiles 10 through 90\n for val in params.ptiles:\n if val > params.avgval:\n pos = (params.maxval - val) / positive_delta * pi / 2.0\n else:\n pos = pi / 2.0 + (\n (params.avgval - val) / negative_delta * pi / 2.0\n )\n ax.add_patch(Rectangle((pos, 1), 0.001, 2, color=\"white\"))\n\n # Tick for params.avgval\n ax.add_patch(Rectangle((pi / 2.0, 1), 0.001, 2, color=\"k\"))\n # Median\n val = params.ptiles[4]\n if val > params.avgval:\n pos = (params.maxval - val) / positive_delta * pi / 2.0\n else:\n pos = pi / 2.0 + ((params.avgval - val) / negative_delta * pi / 2.0)\n ax.add_patch(Rectangle((pos, 1), 0.001, 2, color=\"r\"))\n\n ax.grid(False)\n ax.set_xlim(0, pi)\n ax.set_xticks([])\n if row[col] >= params.avgval:\n theta = (params.maxval - row[col]) / positive_delta * (pi / 2.0)\n theta = max([0, theta])\n else:\n theta = (pi / 2.0) + (params.avgval - row[col]) / negative_delta * (\n pi / 2.0\n )\n theta = min([pi, theta])\n ax.text(\n -0.05 if col == \"high\" else pi + 0.05,\n 2,\n f\"Record: \"\n rf\"{miss(row[col + '_record'])}$^\\circ$F\"\n f\"\\n{', '.join([str(s) for s in row[col + '_record_years']])}\",\n va=\"top\",\n ha=\"left\" if col == \"high\" else \"right\",\n )\n ax.text(\n pi / 2,\n 3.25,\n \"Avg:\\n\" + f\"{miss(row[f'{col}_normal'])}\" + r\"$^\\circ$F\",\n ha=\"center\",\n )\n ax.set_rorigin(-4.5)\n ax.set_yticks([])\n ax.arrow(\n theta,\n -4.5,\n 0,\n 5.5,\n width=0.1,\n head_width=0.2,\n head_length=1,\n fc=\"yellow\",\n ec=\"k\",\n clip_on=False,\n )\n ax.text(\n theta,\n -4.5,\n rf\"{row[col]}$^\\circ$F\" f\"\\n@{row[col + '_time']} LST\",\n ha=\"center\",\n va=\"top\",\n fontsize=14,\n )", "def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)", "def set_progress(self, value):\n self.gauge.SetValue(value)", "def update_metrics(self, metrics, predictions, labels):\n return", "def log_metrics(engine: Engine, tag: str) -> None:\n metrics_format = \"{0} [{1}/{2}]: {3}\".format(\n tag, engine.state.epoch, engine.state.iteration, engine.state.metrics\n )\n engine.logger.info(metrics_format)", "def CreatePodMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_metric(self, metric, metric_name=None):\n metric_name = metric_name or metric.name\n with self._accessor_lock:\n self._accessor.create_metric(metric)\n self._cache_set(metric_name, metric)", "def upload_metrics(metrics_dict, project, dataset, table):\n # Credentials will be loaded from envvar $GOOGLE_APPLICATION_CREDENTIALS.\n bq_client = bigquery.Client(project=project)\n table_ref = bq_client.dataset(dataset).table(table)\n errors = bq_client.insert_rows_json(table_ref, metrics_dict)\n return errors", "def metric(self, slug, num=1, category=None, expire=None, date=None):\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n if category:\n self._categorize(slug, category)\n\n # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an\n # incrby method; .incr accepts a second ``amount`` parameter.\n keys = self._build_keys(slug, date=date)\n\n # Use a pipeline to speed up incrementing multiple keys\n pipe = self.r.pipeline()\n for key in keys:\n pipe.incr(key, num)\n if expire:\n pipe.expire(key, expire)\n pipe.execute()", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def add(self, key, label):\n self.labels[key] = label", "def send_metric(model_id, metric, value):\n host, port, namespace = get_metric_endpoint()\n\n metric_name = '%s.%s' % (namespace, get_metric_name(metric, model_id))\n message = \"%s %f %d\\n\" % (metric_name, float(value), int(time.time()))\n send_tcp(host, port, message)\n\n build_no = get_build_number()\n metric_name = '%s.%s' % (namespace, get_metric_name('build', model_id))\n message = \"%s %f %d\\n\" % (metric_name, build_no, int(time.time()))\n send_tcp(host, port, message)", "def test_metric_labels(self):\n team_id = 'Team_foo'\n m1 = Metric.create(name='Foo Condition', label='foo_condition')\n m2 = Metric.create(name='Bar Condition', label='bar_condition')\n Metric.put_multi([m1, m2])\n survey = Survey.create(team_id=team_id, metrics=[m1.uid, m2.uid])\n survey.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team_id])\n user.put()\n response = self.testapp.get(\n '/api/surveys/{}'.format(survey.uid),\n headers=self.login_headers(user),\n )\n\n logging.info(response.body)\n self.assertEqual(\n json.loads(response.body)['metric_labels'],\n {m1.uid: 'foo_condition', m2.uid: 'bar_condition'},\n )", "def process(self, metric):\n self.metrics.append(metric)\n if self.should_flush():\n self._send()", "def sum(self, key, value):\n self._metrics[key] += value", "def set(self, labels: dict[str, str], value: float | None):\n\n labelstr = _get_label_string(labels)\n\n # If we do not know this instance yet\n if labelstr not in self._data:\n # we do not add new metrics without assigned value\n if value is None:\n return\n\n # we don't know this instance yet, so we create a new one\n self._data[labelstr] = MetricInstance(\n metric=self, labels=labels, value=value\n )\n\n # we already know this instance\n else:\n # if the value is None, we remove it\n if value is None:\n del self._data[labelstr]\n else:\n # we know this instance, so we update its value\n instance = self._data[labelstr]\n instance.value = value" ]
[ "0.7285011", "0.6867734", "0.68111044", "0.6699969", "0.6642851", "0.6484831", "0.64751744", "0.6383549", "0.6362057", "0.6242089", "0.61857253", "0.60445803", "0.5981221", "0.5947248", "0.58967316", "0.5812371", "0.57682973", "0.5750602", "0.5690605", "0.5674989", "0.5456014", "0.5424948", "0.54027647", "0.5344845", "0.5336723", "0.53249854", "0.5322995", "0.53185004", "0.53185004", "0.52390313", "0.52367634", "0.5205778", "0.5205624", "0.5177443", "0.5176384", "0.51599973", "0.5150505", "0.51443577", "0.51435035", "0.51427567", "0.51278746", "0.51147664", "0.5049214", "0.50361866", "0.5017725", "0.49972337", "0.4994562", "0.48945582", "0.48925486", "0.48561192", "0.48494175", "0.48472556", "0.48271435", "0.48142698", "0.47986206", "0.4779052", "0.47271806", "0.47267136", "0.4707503", "0.4702111", "0.4690626", "0.468558", "0.46774536", "0.46730223", "0.46721432", "0.46653154", "0.46592823", "0.4659044", "0.46557134", "0.46290216", "0.4621907", "0.46130264", "0.46033773", "0.4596742", "0.45717052", "0.45554793", "0.45310575", "0.45296583", "0.45215324", "0.45186266", "0.45173395", "0.4491873", "0.44911638", "0.44874513", "0.44861066", "0.44656643", "0.44643232", "0.44572446", "0.4453336", "0.4443427", "0.44410115", "0.44339383", "0.44337016", "0.44318756", "0.44251972", "0.4424569", "0.44180617", "0.4417455", "0.44064683", "0.44054666" ]
0.7893012
0
Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def count(self, counter, delta):\n try:\n self._thread_pool_executor.submit(self._delegate.count, counter, delta)\n except:\n self._logger.exception('Exception caught submitting count metric')", "def inc(self, labels: dict[str, str]):\n\n val = self.get(labels)\n\n if val is None:\n val = 0\n\n val += 1\n\n self.set(labels, val)", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1", "def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value", "def sendUpStatCountTagCounts(node, tag):\n def pushUp(node):\n t = 0\n ta = 0\n for child in node.children:\n tc, tac = pushUp(child)\n ta += tac\n t += tc\n node.tagTranscriptAnnotations += ta\n node.tagTranscripts += t\n return node.tagTranscripts, node.tagTranscriptAnnotations\n if ':' in tag:\n tag = tag.split(':')[-1]\n pushUp(node)", "def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def hit(self, label=None):\n self.labels[label] += 1", "def count(self, key):\n self._metrics[key] += 1", "def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1", "def add_counter(self, data, metric_id=None):\n self._post_data(prefix_id='counters', data=data, metric_id=metric_id)", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def count(self, counter, delta):\n if self.ignore_metrics:\n return\n\n with self._count_rlock:\n self._count_metrics[counter] += delta\n self._count_call_count += 1\n\n old_call_time = self._count_last_call_time\n self._count_last_call_time = arrow.utcnow().timestamp\n if (self._count_call_count == self._max_call_count > 0) or \\\n self._count_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._count_call_count = 0\n self.update_count()", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def emit_counter(self, category, name, pid, timestamp, counter, value):\n event = self._create_event('C', category, name, pid, 0, timestamp)\n event['args'] = {counter: value}\n self._events.append(event)", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def counter(self, metric_name, value=1):\n if self._send_sampled_event():\n counter = \"%s%s:%d|c|@%s\" % (self.metric_name_prepend, metric_name,\n value, self.statsd_sample_rate)\n self._send_events([counter])", "def m2m_changed_metrics(sender, **kwargs):\r\n if 'action' not in kwargs:\r\n return\r\n\r\n action = {\r\n 'post_add': 'm2m.added',\r\n 'post_remove': 'm2m.removed',\r\n 'post_clear': 'm2m.cleared',\r\n }.get(kwargs['action'])\r\n\r\n if not action:\r\n return\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n\r\n if 'model' in kwargs:\r\n tags.append('target_class:{}'.format(kwargs['model'].__name__))\r\n\r\n pk_set = kwargs.get('pk_set', []) or []\r\n\r\n dog_stats_api.increment(\r\n 'edxapp.db.model',\r\n value=len(pk_set),\r\n tags=tags\r\n )", "def do_counter(parser, token):\n try:\n tag_name, reset = token.contents.split(None, 1)\n except ValueError:\n reset = False\n else:\n if reset == 'reset':\n reset = True\n return CounterNode(reset)", "def incr(\n self,\n stat: str,\n count: int = 1,\n rate: float = 1,\n tags: Attributes = None,\n ):\n if _skip_due_to_rate(rate):\n return\n if count < 0:\n raise ValueError(\"count must be a positive value.\")\n\n if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):\n counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat), attributes=tags)\n counter.add(count, attributes=tags)\n return counter", "def post_delete_metrics(sender, **kwargs):\r\n tags = _database_tags('deleted', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def metric(self, slug, num=1, category=None, expire=None, date=None):\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n if category:\n self._categorize(slug, category)\n\n # Increment keys. NOTE: current redis-py (2.7.2) doesn't include an\n # incrby method; .incr accepts a second ``amount`` parameter.\n keys = self._build_keys(slug, date=date)\n\n # Use a pipeline to speed up incrementing multiple keys\n pipe = self.r.pipeline()\n for key in keys:\n pipe.incr(key, num)\n if expire:\n pipe.expire(key, expire)\n pipe.execute()", "def increment_metric_counter(metric_name, redis_db):\n if TEST_MODE:\n print 'Simulate redis incremet, key is %s' % metric_name\n return\n if redis_db:\n try:\n redis_db.incr(metric_name)\n except Exception as e:\n logger.warning(\"Failed to increment redis metric '%s' \"\n \"with exception '%s'\", metric_name, e)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "async def test_counter(client, counter_entities) -> None:\n body = await generate_latest_metrics(client)\n\n assert (\n 'counter_value{domain=\"counter\",'\n 'entity=\"counter.counter\",'\n 'friendly_name=\"None\"} 2.0' in body\n )", "def test_counter(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n [],\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.counter_metric_name)\n self.assertEqual(mf.help, self.counter_metric_help)\n self.assertEqual(mf.type, self.counter_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Counter with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n self.counter_metric_type,\n self.counter_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Counter can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)", "def process_counter_event(\n self,\n name: str,\n categories: Union[List[str], Tuple[str, ...]],\n timestamp: Timestamp,\n wall_clock_time_ns: int,\n values: Dict[str, Union[int, float]],\n ) -> None:\n del name, categories, timestamp, wall_clock_time_ns, values # unused", "def _tally_votes(self, labels, distances):\n votes = collections.defaultdict(int)\n for i, index in enumerate(distances.order(ascending=True).index):\n if i < self.k:\n votes[labels[index]] += 1\n else:\n break\n return votes", "def CreatePodMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})", "def count(self, counter, delta):\n pass # Do nothing", "def process(self, key, value):\n if key not in self.counts:\n self.counts[key] = 0.0\n self.counts[key] += value", "def update_max_counts(self, label, nvals):\n if label not in self.maxcounts:\n if self.verb > 1:\n print('** found new label key: %s' % label)\n self.maxcounts[label] = nvals\n\n else: # rcr - safe as one line? will it be parsed?\n if nvals > self.maxcounts[label]: self.maxcounts[label] = nvals\n\n self.subjcounts[label] += 1", "def increment_counter(self) -> None:", "def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts", "def update_count(self):\n try:\n self._thread_pool_executor.submit(self._update_count_fn)\n except:\n self._logger.exception('Exception caught submitting count metrics update task.')", "def add_count_data(self, counts: Dict[datetime, int]):\n raise NotImplementedError()", "def test_add_tag_to_derived_metric(self):\n pass", "def metrics_group():", "def update_metrics(self, metrics, predictions, labels):\n return", "def get_corpus_counts(x,y,label):\n raise NotImplementedError", "def count_total_tags():\r\n total = TagMgr.count()\r\n stat = StatBookmark(attrib=TAG_CT, data=total)\r\n DBSession.add(stat)", "def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tags()\r\n trans.commit()", "def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]", "def get_tag_counts(label_matches):\r\n\ttag_counts = {}\r\n\tfor word_and_tag in label_matches.keys():\r\n\t\tcurrent_count = tag_counts.get(word_and_tag[_TAG], 0)\r\n\t\ttag_counts[word_and_tag[_TAG]] = current_count+1\r\n\treturn tag_counts", "def _increment_viewcount(model, model_id: int, request):\n object_key = model.__name__ + ':' + str(model_id)\n\n redis = get_redis_connection('traffic_stats')\n view_count = redis.get(object_key)\n if not view_count:\n # Cache miss. Get the view count from the database and cache it.\n try:\n view_count = int(model.objects.get(identifier=model_id).view_count)\n except ObjectDoesNotExist:\n # If the object doesn't even exist in the database, don't track it.\n return\n except FieldDoesNotExist:\n log.error(\n 'Cannot track model {} because it has no view_count field. '\n 'Views for this model will be lost.'.format(model.__name__)\n )\n return -1\n redis.set(object_key, view_count)\n else:\n view_count = int(view_count)\n\n # Only increment the view count if the user has not visited the resource in\n # the last few minutes. Prevents metrics gaming shenanigans.\n ip = _get_user_ip(request)\n if not _is_recent_visitor(ip, object_key):\n redis.incr(object_key)\n view_count += 1\n _mark_recent_visitor(ip, object_key)\n\n # Update the last access time of the model.\n # Store in a sorted set so we can easily find the oldest keys.\n timestamp = time.time()\n redis.execute_command(\n 'ZADD model-last-accessed {} {}'.format(timestamp, object_key)\n )\n return view_count", "def rename_labels_by_count(labels):\n new_labels, label_counts = _count_labels(labels)\n\n return new_labels", "def metrics_counters(self, counters_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='counters')\n return self._post(url, counters_data)", "def inc(self, amount=1):\n if amount < 0:\n raise ValueError('Counters can only be incremented by non-negative amounts.')\n self._shared_list.append((self._labels_args, ('inc', amount)))", "def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None):\n _add_serve_context_tag_values(self._tag_keys, tags)\n super().inc(value, tags)", "def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)", "def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None", "def stat_counter(self, key: str, value: StatCounterDataPoint, dimensions: Dict[str, str] = None):\n self._results_builder.add_absolute_stat_counter_result(\n PluginMeasurementStatCounter(key=key, value=value, dimensions=dimensions,\n entity_selector=self.selector))", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def _get_counter(metric: str) -> int:\n if metric not in db:\n db[metric] = 0\n return db[metric]", "def _increment(cls, counter_name: str, counter_category: str = None) -> int:\n counter_key = {\n \"_id\": counter_category if counter_category else cls.__collection__.name\n }\n counter_update = {\n \"$inc\": {f\"{counter_name}.counter\": 1},\n \"$set\": {f\"{counter_name}.last_update_time\": datetime.datetime.utcnow()},\n }\n counter_element = cls.__counters__.find_one_and_update(\n counter_key,\n counter_update,\n return_document=pymongo.ReturnDocument.AFTER,\n upsert=True,\n )\n return counter_element[counter_name][\"counter\"]", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def _tally(self, user_gpio, level, tick):\n self.count += 1", "def count_nodes(self, term=None, labels: istr = None):", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def increment(self, count_name):\n prop_name = 'count_' + count_name\n setattr(self, prop_name, getattr(self, prop_name, 0) + 1)", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def increment_count(self, word):\n pass", "def update_counter(self, counter, entity):", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def count(self, val):\n raise ValueError('cannot set \\'count\\' in class KeyTracker')", "def update_counters(counter: dict[str, int], new_counts: dict[str, int]) -> dict[str, int]:\n for (name, count) in new_counts.items():\n counter[name] += count\n return counter", "def sum(self, key, value):\n self._metrics[key] += value", "def add_counter_deployment(self, data, feed_id, server_id, resource_id, metric_enum):\n metric_id = self._metric_id_counter_deployment(feed_id=feed_id, server_id=server_id,\n resource_id=resource_id,\n metric_enum=metric_enum)\n self.add_counter(data=data, metric_id=metric_id)", "def _get_metric_count(cls, metric, variant, next=True):\n counters = cls._metric_counters\n key = '%s_%s' % (metric, variant)\n try:\n cls._metric_counters_lock.acquire()\n value = counters.get(key, -1)\n if next:\n value = counters[key] = value + 1\n return value\n finally:\n cls._metric_counters_lock.release()", "def increment(name, count=1):\n # check the counter is tracked\n if name not in _counter_cache:\n track_counter(name)\n _counter_cache.add(name)\n print 'increment: %s' % name\n memcache.incr(name, delta=count, initial_value=0, namespace=NAMESPACE)", "def metrics(self, metrics):\n\n self._metrics = metrics", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def count_tags(tags):\n counts = {}\n for tag_list in tags.values():\n for tag in tag_list:\n if tag in counts:\n counts[tag] += 1\n else:\n counts[tag] = 1\n return counts", "def aggregate_tags_count(new_values, total_sum):\n if total_sum is None:\n total_sum = 0\n return sum(new_values) + total_sum", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))", "def merge_token_counters(\n token_counter1: Dict[str, int], token_counter2: Dict[str, int]\n ) -> Dict[str, int]:\n for token, number in token_counter2.items():\n if token in token_counter1:\n token_counter1[token] += number\n else:\n token_counter1[token] = number\n\n return token_counter1", "def count_nodes(self, term=None, labels: istr = None) -> int:", "def __cross_wiki_counts(self):\n\n print(\"Updating counts by merging with CrossWiki\")\n\n cnt = 0\n crosswiki_path = os.path.join(\n self.base_url, \"generic/p_e_m_data/crosswikis_p_e_m.txt\"\n )\n\n with open(crosswiki_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n parts = line.split(\"\\t\")\n mention = unquote(parts[0])\n\n if (\"Wikipedia\" not in mention) and (\"wikipedia\" not in mention):\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n num_ents = len(parts)\n for i in range(2, num_ents):\n ent_str = parts[i].split(\",\")\n ent_wiki_id = int(ent_str[0])\n freq_ent = int(ent_str[1])\n\n if (\n ent_wiki_id\n not in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)\n if (\n ent_name_re\n in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]\n ):\n ent_wiki_id = self.wikipedia.wiki_id_name_map[\n \"ent_name_to_id\"\n ][ent_name_re]\n\n cnt += 1\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += freq_ent\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += freq_ent", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def count(context, namespace_name, session):\n namespace = namespace_api.get(context, namespace_name, session)\n query = (session.query(func.count(models.MetadefTag.id)).filter_by(\n namespace_id=namespace['id']))\n return query.scalar()", "def update_count(self):\n pass # Do nothing", "def test_task_count_tags(self):\r\n tasks.count_tags()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TAG_CT)\r\n self.assertEqual(stat.data, 4)", "def _increment_token_weight(self, weights: {str: int}, token=None, tag=None, weight=1) -> None:\n if tag:\n for node in self.soup.find_all(tag):\n for token in re.findall(\"[a-zA-Z\\d]+\", node.get_text()):\n weights[token] += weight\n elif token:\n weights[token] += weight", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def update_count(self):\n pass", "async def count(self, ctx):\n #Small chance for the count to appear\n rando = randint(0, 1000)\n if rando == (0):\n await StatsTracker.updateStat(self, \"achievements\", ctx.message.author.id, \"Summoned The Count\")\n await self.bot.say(\n \"You have been visited by The Count. He only visits once in every 1,000 counts! Congratulations! http://vignette3.wikia.nocookie.net/muppet/images/3/3c/CT-p0001-ST.jpg/revision/latest?cb=20060205225316\")\n \n \n #Increment count\n self.counter = self.counter + 1\n \n \n #Calculate how long it has been since last write\n timeSinceWrite = (time.time() - self.lastWrite)\n #If write is necessary, write to file and update lastWrite time\n if (timeSinceWrite >= 60*1):\n countFile = open(\"data/counter/counter.txt\", \"w\")\n countFile.write(str(self.counter))\n countFile.close()\n self.lastWrite = time.time()\n #Print out current count number\n await self.bot.say(self.counter)\n\n #Write to stats\n await StatsTracker.updateStat(self, \"commands\", ctx, ctx.message.content[1:])", "def on_text(self, event):\n self.get_counts()\n self.save()", "def update(self, qid, tags):\n for tag in tags:\n self.D[qid][tag] += 1\n self.N += 1", "def count(self, tokens):\n return self.counts[tokens]", "def add_count(self):\n self.count += 1", "def aggregateCounts(self, timestamps, op = OP_TOTAL,timeResolution = None ):\n timeResolution = timeResolution or self.timeResolutions[0]\n\n if op not in (BitmapCounter.OP_INTERESECT, BitmapCounter.OP_TOTAL, BitmapCounter.OP_AVG):\n raise ValueError(\"Invalid aggregation op %s\" % op)\n\n if op == BitmapCounter.OP_INTERESECT:\n bitop = 'AND'\n else:\n bitop = 'OR'\n\n dest = 'aggregate:%s:%s' % (self.metric,hash(timestamps))\n pipe = self._getPipeline()\n pipe.execute_command('BITOP',bitop, dest, *(self.__getKey(timestamp, timeResolution) for timestamp in timestamps))\n pipe.execute_command('BITCOUNT', dest)\n rx = pipe.execute()\n ret = rx[1]\n if op == BitmapCounter.OP_AVG:\n return float(ret)/len(timestamps)\n else:\n return ret" ]
[ "0.7103015", "0.62737775", "0.60384905", "0.60204303", "0.59881437", "0.58371973", "0.5602605", "0.55809146", "0.5569244", "0.5481773", "0.5438321", "0.5368393", "0.53387356", "0.53331786", "0.5259395", "0.5257974", "0.52503383", "0.52373564", "0.52249354", "0.5209548", "0.5200397", "0.51959217", "0.51663995", "0.5146076", "0.51287234", "0.50925756", "0.5084451", "0.50807995", "0.5048736", "0.5044877", "0.5044836", "0.50229186", "0.5021091", "0.5017482", "0.50150436", "0.5009255", "0.4991467", "0.498522", "0.48910114", "0.48864272", "0.4881675", "0.4877404", "0.48749593", "0.4872899", "0.48704222", "0.4864626", "0.48557225", "0.48421878", "0.4836392", "0.48335016", "0.4818455", "0.4814703", "0.48110375", "0.4810668", "0.47883373", "0.4784786", "0.47818542", "0.47805986", "0.4779807", "0.4770928", "0.47694436", "0.47616297", "0.47596765", "0.47573772", "0.47560278", "0.475096", "0.47276717", "0.4720478", "0.47190842", "0.47157148", "0.4714125", "0.47055984", "0.46995544", "0.4699045", "0.4688458", "0.4687521", "0.46866667", "0.46492332", "0.46394435", "0.46393967", "0.4635764", "0.46333545", "0.4630356", "0.46275386", "0.46219674", "0.46151745", "0.459963", "0.45961365", "0.4585375", "0.45848593", "0.4574509", "0.45702314", "0.45702314", "0.4567598", "0.45662737", "0.4564761", "0.45606774", "0.4558345", "0.4557238", "0.4545268" ]
0.75249547
0
Override so human players are blue by default
def __init__(self, playerIndex, colour="blue"): super().__init__(playerIndex, colour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_if_bottom_color_player_well_set(self):\n ui = UIRender(TestUI.image_path)\n ui.set_bottom_player_color(CELTIC_GREEN)\n self.assertEqual(ui.bottom_player_color, CELTIC_GREEN)\n self.assertEqual(ui.top_player_color, SPQR_RED)\n ui.set_bottom_player_color(SPQR_RED)\n self.assertEqual(ui.bottom_player_color, SPQR_RED)\n self.assertEqual(ui.top_player_color, CELTIC_GREEN)", "def player(self):\n return self._color", "def __init__(self, colour):\n self.colour = colour\n self.name = \"Player\"", "def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2", "def updatePlayer(self, _player):\n if _player.color == 'black': self.players['black'] = _player\n else: self.players['white'] = _player", "def switch_player(self):\n if self.playerOne:\n # sets the chip color to blue\n self.red = 0\n self.blue = 255\n # switch the player to player 2 and change the caption\n self.playerOne = False\n pygame.display.set_caption('Connect4 - Player 2')\n else:\n # sets the chip color to red\n self.red = 250\n self.blue = 0\n # switch the player to player 1 and change the caption\n self.playerOne = True\n pygame.display.set_caption('Connect4 - Player 1')", "def test_it_is_white_by_default():\n rob = Unicorn('Robert')\n assert rob.color == 'White'", "def opponent(self, player):\r\n # player = core.BLACK (can do this for any static var)\r\n if player == core.BLACK:\r\n return core.WHITE\r\n else:\r\n return core.BLACK", "def update_player_turn(self):\n\n if self.get_player_turn() != 'BLUE':\n\n self._player_turn = 'BLUE'\n\n else:\n\n self._player_turn = 'RED'", "def player_highlight(self):\n if self.turn % 2 == 0:\n self.Player2_Label.configure(relief='flat')\n self.Player1_Label.configure(relief='groove')\n else:\n self.Player1_Label.configure(relief='flat')\n self.Player2_Label.configure(relief='groove')", "def happy_color(health):\n if health > 0.8:\n return 'g'\n if health > 0.6:\n return 'y'\n return 'r'", "def _style_colours(self):\n\n pass", "def get_color(self):\n return \"yellow\"", "def draw_colored_player(self, id):\n if id == self.id:\n pygame.draw.rect(self.screen, self.color_1, pygame.Rect(self.first_player_x, self.first_player_y, 20, 140))\n else:\n pygame.draw.rect(self.screen, self.color_2, pygame.Rect(self.second_player_x, self.second_player_y, 20, 140))\n return", "def draw_players(self):\n if self.player_is_skin:\n self.draw_skin_player(self.id)\n else:\n self.draw_colored_player(self.id)\n\n if self.opp_is_skin:\n self.draw_skin_player(3)\n else:\n self.draw_colored_player(3)\n return", "def getHealthColor(player):\r\n \r\n if player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 25):\r\n hpcolor = LRED\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 50):\r\n hpcolor = YELLOW\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 75):\r\n hpcolor = LGREEN\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 85):\r\n hpcolor = WHITE\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 95):\r\n hpcolor = WHITE\r\n else:\r\n hpcolor = WHITE\r\n \r\n # If player.hp is higher than maxhp, make it blue (only a buff can do this)\r\n if player.stats[HP] > player.stats[MAXHP]:\r\n hpcolor = BLUE \r\n \r\n return hpcolor", "def __init__(self, name, hunger, color=\"Green\"):\r\n super().__init__(name, hunger)\r\n self._color = color", "def __init__(self):\n self.red = 0\n self.black = 0", "def opponent(player):\n return BLACK if player is WHITE else WHITE", "def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"", "def on_collision(self):\n self.car_color = arcade.color.RED_DEVIL", "def bet_on_color(self, color: bool, user_seed: str = '') -> None:\n self._bet_type.set(BET_TYPES[2])\n if color:\n numbers = WHEEL_RED\n else:\n numbers = WHEEL_BLACK\n self.__bet(numbers, user_seed)", "def red_car(self):\n self.color = \"red\"\n self.wheels = 4", "def get_opponent_color(self, mycolor):\n if mycolor == ChessGame.BLACK:\n return ChessGame.WHITE\n elif mycolor == ChessGame.WHITE:\n return ChessGame.BLACK\n else:\n raise NotImplementedError()", "def UseColor(self, use_color):\n self.use_color = use_color", "def expose_bomb(self):\n self['bg'] = 'red'\n self['text'] = '*'", "def get_player_disc_colour(self, name):\n player_number = self.game[\"players\"].index(name)\n return self.player_discs[player_number]", "def __set_colors(self, players):\n\n colors = set()\n for p in players:\n if p.get_color() is None:\n continue\n colors.add(p.get_color())\n if len(colors) != 0 and len(colors) != len(players):\n raise ValueError(\"Each player does not have a unique assigned color.\")\n \n if len(colors) == 0:\n for i, p in enumerate(players):\n p.set_color(BoardPlayer.POSSIBLE_COLORS[i])", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def on_show(self): \n arcade.set_background_color(arcade.color.BLACK)", "def blue(self) -> float:\n return self._blue", "def get_trump_color(self):\n return random.choice(Card.colors[1:])", "def __init__(self):\n self.colors = (\n 'BLACK', 'RED', 'GREEN', 'YELLOW',\n 'BLUE', 'MAGENTA', 'CYAN', 'WHITE'\n )\n\n self.disable_color = True\n\n if sys.stdout.isatty():\n self.disable_color = False", "def piece_color(self, piece):\n if piece == None:\n return None\n if ord(ChessPiece.W_KING) <= ord(piece) <= ord(ChessPiece.W_PAWN):\n return \"white\"\n return \"black\"", "def test_change_color_of_the_device__false():", "def __init__(self, red, green, blue, alpha = 255):\n self._red = red\n self._green = green\n self._blue = blue \n self._alpha = alpha", "def __init__(self, device: SensemeDevice) -> None:\n super().__init__(device, device.name)\n self._attr_supported_color_modes = {ColorMode.BRIGHTNESS}\n self._attr_color_mode = ColorMode.BRIGHTNESS", "def setUp(self):\r\n self.black = Color('black', (0, 0, 0))\r\n self.red = Color('red', (255, 0, 0))\r\n self.pink = Color('pink', (100, 0, 0))\r\n self.green = Color('green', (0, 255, 0))", "def get_opponent_color(self, self_color):\r\n return abs(self_color - 1)", "def disable_color(self):\n self.disable_color = True", "def test_change_color_of_the_device__true():", "def set_color(self, color):\n\t\tpass", "def play_human_custom(env):\n play_human(env)", "def play(self, player, game): \n super().play(player, game)\n game.set_action(\"SLEEP_CODER\")", "def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'", "def getTurn(self):\r\n return self.players[self.getCurrentPlayer()].getColor()", "def get_trump_color(self):\n color_counter = collections.Counter()\n for card in self.hand:\n color = card.color\n if color == \"White\":\n continue\n color_counter[color] += 1\n if not color_counter.most_common(1):\n return super().get_trump_color()\n else:\n return color_counter.most_common(1)[0][0]", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def use_black_text(self): \n black_foreground = 0\n for color in range(curses.COLORS):\n curses.init_pair(color, black_foreground, color)", "def color(piece):\n return Color.BLACK if piece in {Piece.BP, Piece.BN, Piece.BB, Piece.BR, Piece.BQ, Piece.BK} else Color.WHITE", "def ShouldInheritColours(self):\r\n\r\n return False", "def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down", "def bark(self):\n print(f\"{self.name} is now barking\")", "def updateTrouserColor(self, trouserColor): \n self.avatarConfiguration[\"trousers\"] = str(trouserColor)\n self.paintTrousers()", "def color(self):\n return self.COLOR", "def get_palace_board_blue(self):\n\n return self._palace_board_blue", "def on_key_press(self, symbol, modifiers):\n\t\tif symbol == key._1:\n\t\t\tself.colour = \"red\"\n\t\telif symbol == key._2:\n\t\t\tself.colour = \"yellow\"\n\t\telif symbol == key._3:\n\t\t\tself.colour = \"lightblue\"\n\t\telif symbol == key._4:\n\t\t\tself.colour == \"green\"\n\t\telif symbol == key._5:\n\t\t\tself.colour == \"pink\"\n\t\telif symbol == key._6:\n\t\t\tself.color == \"purple\"\n\t\telif symbol == key._7:\n\t\t\tself.colour == \"navy\"\n\t\telif symbol == key._8:\n\t\t\tself.colour == \"orange\"\n\t\t#If player has chosent their colour\n\t\telif symbol == key.ENTER or symbol == key.RETURN:\n\t\t\t#If there are more players left\n\t\t\tif self.players_count > 1:\n\t\t\t\t#Get player details and place in list player\n\t\t\t\tplayer = self.player_details[self.players - self.players_count]\n\t\t\t\t#Add colour to the players details\n\t\t\t\tplayer.append(self.colour)\n\t\t\t\t#Place the player list back in the player details\n\t\t\t\tself.player_details[self.players - self.players_count] = player\n\t\t\t\t#Reset the temporary colour holder\n\t\t\t\tself.colour = \"\"\n\t\t\t\t#Decrement the numbers of players left\n\t\t\t\tself.players_count -= 1\n\t\t\t\t#Remove the text label\n\t\t\t\tself.labels.pop(1)\n\t\t\t\t#Reset the text label with the next username\n\t\t\t\tself.text_label = pyglet.text.Label(\"{} choose a colour:\".format(self.player_details[(self.players - self.players_count)][0].capitalize()),\n \tfont_name='Times New Roman',\n \tfont_size=36,\n \tx=self.width//2, y=self.height//2,\n \tanchor_x='center', anchor_y='center', color=(0, 0, 0, 255))\n\t\t\t\t#Place the label back into the labels list in the same position\n\t\t\t\ttmp_label = [self.labels[0]]\n\t\t\t\ttmp_label.append(self.text_label)\n\t\t\t\ttmp_label += self.labels[1:]\n\t\t\t\tself.labels = tmp_label\n\t\t\t\tself.render()\n\t\t\t#We are at the last player\n\t\t\telse:\n\t\t\t\t#Get player details and place in list player\n\t\t\t\tplayer = self.player_details[self.players - self.players_count]\n\t\t\t\t#Add colour to the players details\n\t\t\t\tplayer.append(self.colour)\n\t\t\t\t#Place the player list back in the player details\n\t\t\t\tself.player_details[self.players - self.players_count] = player\n\t\t\t\t#Set up game\n\t\t\t\tg = self.game_setup()\n\t\t\t\t#Close the window\n\t\t\t\tpyglet.clock.schedule_once(self.exit_callback , 2)\n\t\t\t\t#Start the game\n\t\t\t\tstartgamewindow(g)", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[-1,0],[1,0],[0,-1]],\\\n 'palace':[[-1,-1],[1,-1]]}\\\n })", "def winner(self, black, white):\n fill(255)\n rect(150, 150, 150, 80, 7)\n fill(0)\n textSize(20)\n if self.tie:\n text(\"It's Tie\", 160, 180)\n elif self.black_wins:\n text(\"Black WINS\", 160, 180)\n elif self.white_wins:\n text(\"White WINS\", 160, 180)\n result = \"black: \" + str(black)\n text(result, 160, 200)\n result = \"white: \" + str(white)\n text(result, 160, 220)", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def player_scored(self,\n player: ba.Player,\n base_points: int = 1,\n target: Sequence[float] = None,\n kill: bool = False,\n victim_player: ba.Player = None,\n scale: float = 1.0,\n color: Sequence[float] = None,\n title: Union[str, ba.Lstr] = None,\n screenmessage: bool = True,\n display: bool = True,\n importance: int = 1,\n showpoints: bool = True,\n big_message: bool = False) -> int:\n # FIXME: Tidy this up.\n # pylint: disable=cyclic-import\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-statements\n from bastd.actor.popuptext import PopupText\n from ba import _math\n from ba._gameactivity import GameActivity\n from ba._lang import Lstr\n del victim_player # Currently unused.\n name = player.getname()\n s_player = self._player_records[name]\n\n if kill:\n s_player.submit_kill(showpoints=showpoints)\n\n display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)\n\n if color is not None:\n display_color = color\n elif importance != 1:\n display_color = (1.0, 1.0, 0.4, 1.0)\n points = base_points\n\n # If they want a big announcement, throw a zoom-text up there.\n if display and big_message:\n try:\n assert self._activity is not None\n activity = self._activity()\n if isinstance(activity, GameActivity):\n name_full = player.getname(full=True, icon=False)\n activity.show_zoom_message(\n Lstr(resource='nameScoresText',\n subs=[('${NAME}', name_full)]),\n color=_math.normalized_color(player.team.color))\n except Exception:\n print_exception('error showing big_message')\n\n # If we currently have a actor, pop up a score over it.\n if display and showpoints:\n our_pos = player.node.position if player.node else None\n if our_pos is not None:\n if target is None:\n target = our_pos\n\n # If display-pos is *way* lower than us, raise it up\n # (so we can still see scores from dudes that fell off cliffs).\n display_pos = (target[0], max(target[1], our_pos[1] - 2.0),\n min(target[2], our_pos[2] + 2.0))\n activity = self.getactivity()\n if activity is not None:\n if title is not None:\n sval = Lstr(value='+${A} ${B}',\n subs=[('${A}', str(points)),\n ('${B}', title)])\n else:\n sval = Lstr(value='+${A}',\n subs=[('${A}', str(points))])\n PopupText(sval,\n color=display_color,\n scale=1.2 * scale,\n position=display_pos).autoretain()\n\n # Tally kills.\n if kill:\n s_player.accum_kill_count += 1\n s_player.kill_count += 1\n\n # Report non-kill scorings.\n try:\n if screenmessage and not kill:\n _ba.screenmessage(Lstr(resource='nameScoresText',\n subs=[('${NAME}', name)]),\n top=True,\n color=player.color,\n image=player.get_icon())\n except Exception:\n print_exception('error announcing score')\n\n s_player.score += points\n s_player.accumscore += points\n\n # Inform a running game of the score.\n if points != 0:\n activity = self._activity() if self._activity is not None else None\n if activity is not None:\n activity.handlemessage(PlayerScoredMessage(score=points))\n\n return points", "def __init__(self, red=Black.red, green=Black.green, blue=Black.blue):\n self.color = Color(red, green, blue)\n\n self.template = '\\ttextcolor = {textcolor};\\n'", "def setRandomColor():\n setColor(getRandomColor())", "def setForeground(self, color = None):", "def _get_red(self):\n return self.__red", "def _get_red(self):\n return self.__red", "def _get_red(self):\n return self.__red", "def update(self):\n if self.pinky_wins:\n fill(1)\n textSize(50)\n text(\"PINKY WINS\", self.WIDTH/2 - 140, self.HEIGHT/2)\n if self.player_wins:\n fill(1)\n textSize(50)\n text(\"YOU WIN!!!\", self.WIDTH/2 - 140, self.HEIGHT/2)", "def change_color(self, color):\r\n if color == \"black\":\r\n self.color = \"white\"\r\n self.canvas.itemconfig(self.ball, fill='white')\r\n else:\r\n self.color = \"black\"\r\n self.canvas.itemconfig(self.ball, fill='black')", "def battle_resting(self):\n pass", "def heart_status(self):\r\n return 'pumping blood'", "def kill(self):\n self.is_dead = True\n self.color = (255, 0, 0)# Turn RED when dead", "def GetDefaultColor(self):\n return wx.BLACK", "def SetBlue(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUC_SetBlue(self, *args)", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def randomcolour(self):\n r = random.randrange(1, 255)\n g = random.randrange(1, 255)\n b = random.randrange(1, 255)\n self.colour((r,g,b))", "def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def test_issue_269(self):\n\n c = pygame.Color(0)\n c.hsva = 360, 0, 0, 0\n self.assertEqual(c.hsva, (0, 0, 0, 0))\n c.hsva = 360, 100, 100, 100\n self.assertEqual(c.hsva, (0, 100, 100, 100))\n self.assertEqual(c, (255, 0, 0, 255))", "def change_log_success_status(self, value):\n if value:\n self.embed.colour = 3066993 # Discord color format\n elif value is False:\n self.embed.colour = 15158332\n elif not value:\n self.embed.colour = 15105570", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def setMyBackground(self):\n base.setBackgroundColor(globals.colors['guiblue4'])", "def switch_color(color):\n return \"b\" if color == \"w\" else \"w\"", "def change_Shield_Color (im, newcolor, GRAPHICSMODE=\"PIL\"):\r\n DEBUG = 0\r\n #DEBUG = 1\r\n\r\n if DEBUG == 1:\r\n print \"newcolor:\", newcolor\r\n print \"im:\", im\r\n \r\n THRESHOLD = 225 # Doesn't rely on colour being exactly 255,255,255\r\n\r\n if GRAPHICSMODE == \"PIL\":\r\n im.convert(\"RGBA\")\r\n newimdata = []\r\n for color in im.getdata():\r\n if color[0] > THRESHOLD:\r\n if color[1] > THRESHOLD:\r\n if color[2] > THRESHOLD:\r\n if color[3] > THRESHOLD:\r\n newimdata.append(newcolor)\r\n else:\r\n newimdata.append(color)\r\n else:\r\n newimdata.append(color)\r\n else:\r\n newimdata.append(color)\r\n else:\r\n newimdata.append(color)\r\n #print\r\n newim = PilImage.new(\"RGBA\",im.size)\r\n newim.putdata(newimdata)\r\n\r\n elif GRAPHICSMODE == \"PyGame\":\r\n im.convert()\r\n if DEBUG == 1:\r\n print \"newcolor:\", newcolor\r\n newcolor = pygame.Color(newcolor[0], newcolor[1], newcolor[2], 255) # why the HELL does pygame reverse RBG -> GBR???\r\n\r\n if DEBUG == 1:\r\n print \"newcolor(converted):\", newcolor\r\n newim = pygame.Surface((im.get_width(),im.get_height()), pygame.SRCALPHA, 32) #change its background color \r\n\r\n tochange = pygame.Color(THRESHOLD, THRESHOLD, THRESHOLD,255) # or whatever yellow color you want\r\n\r\n thresh = (20, 20, 20, 20) # or whatever threshold works\r\n\r\n if DEBUG == 1:\r\n print \"newim:\",newim \r\n print \"type(newim):\",type(newim)\r\n print \"im:\",im\r\n print \"type(im):\",type(im)\r\n print \"tochange:\",tochange\r\n print \"thresh:\",thresh\r\n print \"newcolor:\",newcolor\r\n\r\n pygame.Surface.lock(im)\r\n\r\n for imx in range(0, im.get_width()):\r\n for imy in range(0, im.get_height()):\r\n color = im.get_at((imx,imy))\r\n if color[0] > THRESHOLD:\r\n if color[1] > THRESHOLD:\r\n if color[2] > THRESHOLD:\r\n if color[3] > THRESHOLD:\r\n newim.set_at((imx,imy), newcolor)\r\n else:\r\n newim.set_at((imx,imy), color)\r\n else:\r\n newim.set_at((imx,imy), color)\r\n else:\r\n newim.set_at((imx,imy), color)\r\n else:\r\n newim.set_at((imx,imy), color)\r\n\r\n if DEBUG == 1:\r\n if os.path.isfile(\"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE_OLD.png\"):\r\n os.remove(\"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE_OLD.png\")\r\n if os.path.isfile(\"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE.png\"):\r\n os.rename(\"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE.png\", \"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE_OLD.png\")\r\n pygame.image.save(newim, \"E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE.png\")\r\n print \"wrote file 'E:\\\\Projects\\\\NEW RENPY GAME\\\\TEMP\\\\TESTFILE.png'\"\r\n print\r\n\r\n pygame.Surface.unlock(im)\r\n\r\n return newim", "def fireworks():\n\n sleep_speed = 0.025\n\n # Turn on white\n PYGLOW.color(\"white\", 60)\n sleep(sleep_speed)\n # Turn on blue\n PYGLOW.color(\"blue\", 60)\n sleep(sleep_speed)\n # Fade white\n PYGLOW.color(\"white\", 50)\n sleep(sleep_speed)\n # Turn on green\n PYGLOW.color(\"green\", 60)\n sleep(sleep_speed)\n # Fade white and blue\n PYGLOW.color(\"white\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 50)\n sleep(sleep_speed)\n # Turn on yellow\n PYGLOW.color(\"yellow\", 60)\n sleep(sleep_speed)\n # Fade white, blue, and green\n PYGLOW.color(\"white\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 50)\n sleep(sleep_speed)\n # Turn on orange\n PYGLOW.color(\"orange\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, and yellow\n PYGLOW.color(\"white\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 50)\n sleep(sleep_speed)\n # Turn on red\n PYGLOW.color(\"red\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, yellow, and orange\n PYGLOW.color(\"white\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 50)\n sleep(sleep_speed)\n # Fade all\n PYGLOW.color(\"white\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 50)\n sleep(sleep_speed)\n # Fade blue, green, yellow, orange, and red\n PYGLOW.color(\"blue\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 40)\n sleep(sleep_speed)\n # Fade green, yellow, orange, and red\n PYGLOW.color(\"green\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 30)\n sleep(sleep_speed)\n # Fade yellow, orange, and red\n PYGLOW.color(\"yellow\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 20)\n sleep(sleep_speed)\n # Fade orange, and red\n PYGLOW.color(\"orange\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 10)\n sleep(sleep_speed)\n # Fade red\n PYGLOW.color(\"red\", 0)\n sleep(sleep_speed)\n # Pause 1 second before the next one\n sleep(1)", "def set_game_state(self,winner):\r\n if winner == 'b':\r\n self._game_state = \"BLACK_WON\"\r\n else:\r\n self._game_state = \"RED_WON\"", "def __init__(self, player1, player2):\n # # players of the game {player1name: {color: , red_marbles:}}\n # self._players = {player1[0]: {\"name\": player1[0], \"color\": player1[1]},\n # player2[0]: {\"name\": player2[0], \"color\": player2[1]}}\n # # empty board, no marbles yet\n # self._board = self.create_board()\n # # current player's turn\n # self._turn = None\n # # winner state\n # self._winner = None\n # # red marbles captured for each player, needs addition of black and white marbles\n # self._captured = {player1[0]: 0, player2[0]: 0}\n pass", "def __remove_player(self, color):\n self.state.remove_player(color)\n self.violators.append(self.players[color])", "def __init__(self, name, full_name, team, eye_color, hair_color, base):\n\n self.name = name\n self.full_name = full_name\n self.team = team\n self.eye_color = eye_color\n self.hair_color = hair_color\n self.base = base\n\n self.powers = []\n self.nemeses = []", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value" ]
[ "0.6794499", "0.6771435", "0.6575915", "0.65424424", "0.6528288", "0.640866", "0.62868875", "0.6146448", "0.6046117", "0.6043046", "0.60069966", "0.59718126", "0.5966473", "0.5936794", "0.5885561", "0.58830494", "0.58271277", "0.57827985", "0.57747686", "0.57669955", "0.5732664", "0.5705307", "0.57047546", "0.5702987", "0.569082", "0.5687466", "0.56769234", "0.56634164", "0.56587493", "0.56587493", "0.56587493", "0.56587493", "0.56587493", "0.56587493", "0.56587493", "0.56575894", "0.5652206", "0.5644664", "0.56355214", "0.56350106", "0.5619468", "0.55925447", "0.5586012", "0.5547351", "0.55433416", "0.55333227", "0.5531636", "0.5525445", "0.55254424", "0.5524971", "0.55211776", "0.55186504", "0.5493109", "0.5488165", "0.5487603", "0.54807353", "0.5478099", "0.54641473", "0.54591805", "0.5457725", "0.54470944", "0.5446355", "0.5441905", "0.5435735", "0.54266214", "0.5425362", "0.5416227", "0.54159105", "0.5409191", "0.54006773", "0.5379633", "0.5373274", "0.53635097", "0.53635097", "0.53635097", "0.53564614", "0.5348443", "0.53449607", "0.5334846", "0.53267103", "0.5322761", "0.53137285", "0.53114015", "0.53114015", "0.53046113", "0.52817154", "0.52813494", "0.52705437", "0.5269672", "0.5267487", "0.5267487", "0.5252664", "0.5251035", "0.5244627", "0.523458", "0.5227709", "0.52267295", "0.5221672", "0.5219131", "0.5214375" ]
0.65751874
3
Very important. Lets the UI know to give human player control.
def isHuman(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __handle_view_player(self, gamestate_component):", "def game_play(self):", "def auto_play(self):\n raise NotImplementedError(self)", "def newPlayer():\r\n pass", "def take_control(self):\n pass", "def show_playing(self):\n\n print(\"show_playing needs implementation\")", "def play(self):\n pass", "def pass_player(self):\n # pass control to next player by asking game who that is\n self.disable_all_buttons()\n self.game.pass_control_next(self)", "def change_player_state(self):\n if self.active_player.get() is True:\n # Get game phase and unlock respective buttons?\n # or should game do that\n pass\n else:\n pass\n #self.disable_all_buttons()", "def __init__(self, player):\n self.player = player", "def control_plugin(self):\n pass", "def spinupplayer():\n if __name__ == '__main__':\n _playthread = ImmediatePlayer(PLAYER_SETTINGS, COLOR_SETTINGS)\n PROCESSES.append(_playthread)\n _playthread.start()", "def player_moves_player(self, x, y):\n activePlayer = self.get_active_player()\n if activePlayer.humanControlled:\n super(RobotGame, self).player_moves_player(x, y)", "def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)", "def __init__(self, player):\n\t\tself.player = player", "def human_player(human):\n def player():\n if human.wait_for_teleop():\n yield \"wait for Teleop\"\n\n # TODO: Human player behaviors...\n # The STATION Human ought to move Cubes from the Exchange into\n # Vault columns. The others ought to push Cubes through Portals.\n while True:\n yield \"done\"\n\n human.set_player(player())", "def get_player(self):\r\n return self.player_control.get_player()", "def player_movement(self):", "def play(self, player, game): \n super().play(player, game)\n game.set_action(\"SLEEP_CODER\")", "def play(self, state,currentplayer):\n\t\tpass", "def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)", "def update(self):\r\n if not self.tr.game_over and self.tr.turn_tracker:\r\n self.computer_play()", "def _control_pause(self):\n self.player.pause()", "def test_get_player(self):\n pass", "def play(self, player, game):\n super().play(player, game)\n game.set_action(\"PICKUP_CODER\")", "def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)", "def play(self):\n print(\"Bientôt ! :)\")", "def play(self):\n print('Playing game...')", "def _control_play(self, entities: List[str]):\n if entities:\n self.player.play(entities)\n else:\n self.player.respond(\"I'm sorry, I couldn't find that for you.\")", "def play_game():\n pass", "def testPlayback(self):\n \n pass", "def setup_game(self):", "def display(self):\n print(\"----Player----\")\n print(\"Player {} is using {} as their mark\".format(self.name, self.mark))", "async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...", "def playerCanPlay(game, situation, player):\r\n return True", "def __init__(self):\n\t\tself.playercolider()", "def utility(self, state, player):\r\n raise NotImplementedError", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def player(self):\n return self.current.player", "def player_take(self):\n taken = self.take()\n if taken:\n self.handler.message_box.add_msg(\"You take the {}!\".format(taken),\n data.COLOURS['player_item_text'])", "def set_player_state(self, player):\n\n health_str = \"HP {0}/{1}\".format(int(player.health), int(player.max_health))\n self.health_label.element.text = health_str\n\n if player.armor is not None:\n armor_hp = int(player.armor.health)\n max_armor_hp = int(player.armor.max_health)\n armor_str = \"ARMOR {0}/{1}\".format(armor_hp, max_armor_hp)\n self.armor_label.element.text = armor_str", "def _handleInput(self):\n\n Game.Player.running(Game.ControlState[Game.MoveRight], not (Game.ControlState[Game.MoveRight] == Game.ControlState[Game.MoveLeft]))\n Game.Player.jumping(Game.ControlState[Game.Jump])\n Game.Player.flying(Game.ControlState[Game.Fly])\n Game.Player.firing(Game.ControlState[Game.Fire])", "def player(network, event) :\n\twhile event.is_set() :\n\t\t_, _, (V, P) = network.act()\n\t\t# print V, P\n\t\tnetwork.env.render()\n\t\tif (network.env.done) :\n\t\t\tnetwork.reset_game()\n\t\ttime.sleep(0.1)", "def harvest(self, player):\n return", "def start_of_game(self):\n pass", "def update(self):\n # If the game is not over.\n if self.tictactoe:\n # If the player is human, the choice can only be detected by the game,\n # because the player has no responsibility over the window\n if self.player.chooser:\n choice = self.choice\n self.choice = None\n else:\n choice = self.player.play(self.tictactoe)\n self.tictactoe.choose(choice)\n else:\n if self.on:\n self.end_time = time.time()\n self.on = False\n if self.restart_at_end:\n if time.time() - self.end_time > self.end_duration:\n self.restart()", "def play(self):\n\n #Call the superclass play\n return super().play()", "def player(self):\n # type: () -> string_types\n return self._player", "def HandButton(self, event):\n pass", "def visit_player(self, player):\n self.visit_character(player)", "def play(self):\n self.player = Knight()\n self._occupy_huts()\n acquired_hut_counter = 0\n\n self.show_game_mission()\n self.player.show_health(bold=True)\n\n while acquired_hut_counter < 5:\n idx = self._process_user_choice()\n self.player.acquire_hut(self.huts[idx-1])\n\n if self.player.health_meter <= 0:\n print_bold(\"YOU LOSE :( Better luck next time\")\n break\n\n if self.huts[idx-1].is_acquired:\n acquired_hut_counter += 1\n\n if acquired_hut_counter == 5:\n print_bold(\"Congratulations! YOU WIN!!!\")", "def play(self):\n self.strategy(self)", "def player(self):\n legal = self.board.legal_move(self.black)\n if(len(legal) == 0):\n self.p_no_move = 1\n print(\"No legal move for player!\")\n self.computer_turn = True\n self.player_turn = False", "def onShow(self):\n pass", "def play_auto(self):\n from .players import get_player\n\n while not self.is_game_over:\n next = self.next_player\n player = self.player_x if next == 'X' else self.player_o\n if player == 'human':\n return\n\n player_obj = get_player(player)\n self.play(player_obj.play(self))", "def switch_player(self):\n self.player = Nim.other_player(self.player)", "async def game(self):\n pass", "def behaviors_paused(self) -> bool:", "def get_player(self):\n return self.player", "def get_player(self):\n return self.player", "def player(self):\n return self._player", "def switch_to_controls_screen(self, player):\n\t\tcontrols = SelectControls(player) #it's possible these should be ControlsControls but I'm not sure\n\t\tcontrol_manager = ControlManager(controls)\n\t\tcontrols_screen = ControlsScreen(control_manager, player)\n\t\tself.set_current_screen(controls_screen)", "def UsePresentation(self, event):\n pass", "def on_pause(self):\n pass", "def select_player(n):\n pygame.display.set_caption(\"You selected: \" + PROF[n])", "def user_control(board, x_player, y_player, button_pressed, inventory):\n\n red = '\\033[31m'\n reset_color = '\\033[0m'\n item_colors = {\n '●': '\\033[33m', '⚛': '\\033[34m', '✿': '\\033[31m', '✡': '\\033[94m',\n '♦': '\\033[32m', 'ᴥ': '\\033[31m', '☀': '\\033[33m'}\n place_on_right_side = board[y_player][x_player + 1]\n place_on_left_side = board[y_player][x_player - 1]\n place_on_up_side = board[y_player - 1][x_player]\n place_on_down_side = board[y_player + 1][x_player]\n places_prohibited_to_stand_on = [\n 'X', red + '#' + reset_color, '☹', '☃', '♞', '☻', '☬', item_colors['☀'] + '☀' + reset_color, red\n + '☀' + reset_color]\n\n if button_pressed == 'd' and place_on_right_side not in places_prohibited_to_stand_on:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side not in places_prohibited_to_stand_on:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side not in places_prohibited_to_stand_on:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side not in places_prohibited_to_stand_on:\n y_player += 1\n\n friends = ['☹', '☃', '♞', '☻', '☬']\n # conditions for level 4 (feeding friends)\n if button_pressed == 'd' and place_on_right_side in friends and inventory['●'] > 19:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side in friends and inventory['●'] > 19:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side in friends and inventory['●'] > 19:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side in friends and inventory['●'] > 19:\n y_player += 1\n return x_player, y_player", "def is_playfield(cls):\n return True", "def play(self, player, game):\n super().play(player, game)\n game.set_action(\"STEAL_CODER\")", "def selectPlayerMenu(self, fpsclock, screen):\n screen.fill((0, 0, 0))\n self.draw_text(screen, \"Press h to play\", 40, 400, 150, 255, 255, 255, True)\n self.draw_text(screen, \"Press a to train the AI\", 40, 400, 300, 255, 255, 255, True)\n pygame.display.flip()\n while True:\n dt = fpsclock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_h:\n self.reset()\n return \"human\"\n if event.key == pygame.K_a:\n return \"AI\"", "def switchPlayer(self):\n self.player = Nim.otherPlayer(self.player)", "def display_player_points():\r\n pass", "def play(self):\n\n\t\tself.player_model.current_player = self.player_model.player\n\t\tself.player_frame.prepare_to_play(self.player_model.available_cells)", "def before_turn(self, playerInfo):\n self.PlayerInfo = playerInfo", "def player_clicked(self, player):\n self.chosen_player = player\n self.setEnabled(False)\n for (_, _, button) in self.buttons:\n button.setEnabled(False)\n dialog = OpenByPlanetName(self, player)\n dialog.exec()\n self.setEnabled(True)\n for (_, _, button) in self.buttons:\n button.setEnabled(True)", "def controls_setup(self):\n pass", "def on_show(self):\n arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)\n\n \n \n\n #self.player_music_intro.EOS_LOOP = 'loop'\n self.player_music_intro = arcade.play_sound(self.music_intro)\n \n\n print(\"type(self.player_music_intro) : \", type(self.player_music_intro))", "def play(self):\r\n self.perform_strategy()", "def play_game():\n pass", "def banshee_playing(self):\n try:\n self.banshee = self.bus.get_object(\"org.bansheeproject.Banshee\",\"/org/bansheeproject/Banshee/PlayerEngine\")\n status = self.banshee.GetCurrentState()\n \n if status == \"playing\":\n currentTrack = self.banshee.GetCurrentTrack()\n self.set_artist_song_entry(currentTrack['artist'], currentTrack['name'])\n artist = self.clean_user_input(currentTrack['artist'])\n song = self.clean_user_input(currentTrack['name'])\n self.get_lyrics(artist, song)\n else:\n self.display_message(\"No media player playing!\")\n except:\n self.display_message(\"Something wrong with D-Bus\")", "def is_playing(self):\n raise NotImplementedError", "def run(self):\n super(MovementControl,self).run()", "def __init__(self):\n \n super(Player, self).__init__(image = Player.image,\n x = games.screen.width/2,\n bottom = games.screen.height)\n self.time_til_drop = 0\n \n self.score = games.Text(value = 200, size = 25, color = color.white,\n top = 5, right = games.screen.width - 10)\n games.screen.add(self.score)\n \n player_health_label = games.Text(value = 'Player Health:', size = 25, color = color.white,\n top = 5, right = games.screen.width - 45) \n games.screen.add(player_health_label)", "def __init__(self, player_name, word, hangman):\n super(Window, self).__init__()\n uic.loadUi('window.ui', self)\n self.setWindowTitle(i18n.OUT_MSG_TITLE)\n self.player_name = player_name\n self.greeterboard.hangman = hangman\n self.scoreboard.hangman = hangman\n self.word_view.word = word\n self.play_again = True", "def Peacekeaper(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def OnPlay(self):\r\n # check if there is a file to play, otherwise open a\r\n # Tk.FileDialog to select a file\r\n print(\"1-1\")\r\n\r\n\r\n self.Media = self.Instance.media_new(self.youtube_url)\r\n self.player.set_media(self.Media)\r\n\r\n # set the window id where to render VLC's video output\r\n if platform.system() == 'Windows':\r\n print(\"1-3\")\r\n self.player.set_hwnd(self.GetHandle())\r\n else:\r\n print(\"1-4\")\r\n self.player.set_xwindow(self.GetHandle()) # this line messes up windows\r\n # FIXME: this should be made cross-platform\r\n\r\n # Try to launch the media, if this fails display an error message\r\n if self.player.play() == -1:\r\n print(\"1-6\")\r\n self.errorDialog(\"Unable to play.\")", "def player(hass, request):\n return request.param(hass)", "def setUp(self):\n self.player = Player()", "async def handle_player_update(self, update: andesite.PlayerUpdate) -> None:\n ...", "def on_worker_started(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)", "def play_human_custom(env):\n play_human(env)", "def display_player(self, pick, win):\n if pick == 'none':\n return False\n if pick == 'paper':\n player = pygame.image.load('paper.png')\n elif pick == 'scissor':\n player = pygame.image.load('scissor.png')\n else:\n player = pygame.image.load('rock.png')\n player = pygame.transform.scale(player, (100, 100))\n\n win.blit(player, (screen_width // 6, screen_height // 3))", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def __init__(self) -> None:\n self.is_human = True", "def on_key_press(self, key, modifiers):\n if self.player_enabled:\n super().on_key_press(key, modifiers)", "def do_nothing(self, player):\n return '%s spins \\'nun\\' and does nothing.' % (player,)", "def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)", "def on_main_action(self, e):\n if self.app.roster.IsShown():\n wx.PostEvent(self.app.roster, ev.HideRoster())\n else:\n wx.PostEvent(self.app.roster, ev.ShowRoster())", "def current_user_playing(self):\n return self.user_playing(users.GetCurrentUser())", "def __handle_view_win_condition(self, gamestate_component):" ]
[ "0.69776434", "0.67034006", "0.6657744", "0.655654", "0.6553702", "0.6436399", "0.64160234", "0.6278436", "0.6235379", "0.6231291", "0.6205271", "0.6144004", "0.61371064", "0.6132646", "0.6123176", "0.60980403", "0.60681164", "0.6064519", "0.60623634", "0.60614514", "0.60341895", "0.60140866", "0.6007204", "0.60012525", "0.59965414", "0.5990435", "0.59892607", "0.59806794", "0.59673595", "0.5966304", "0.5942731", "0.5924463", "0.5915919", "0.5899447", "0.5892335", "0.5883403", "0.5880907", "0.58589906", "0.58589906", "0.5835535", "0.5830786", "0.58277774", "0.581035", "0.57900894", "0.5777083", "0.5765247", "0.57593715", "0.5747211", "0.574321", "0.57392466", "0.5734531", "0.5733494", "0.57274467", "0.57253546", "0.57246816", "0.57190657", "0.5702099", "0.5698808", "0.56954736", "0.5683222", "0.5683222", "0.5681738", "0.56742555", "0.567123", "0.567035", "0.5668007", "0.56586456", "0.5655441", "0.565012", "0.56435025", "0.5633123", "0.5629379", "0.56259114", "0.5625496", "0.5623902", "0.5619111", "0.5611838", "0.56117046", "0.560914", "0.56025314", "0.56017566", "0.559707", "0.5596308", "0.5578385", "0.5577681", "0.55769724", "0.5573975", "0.55736655", "0.557358", "0.55628616", "0.5557766", "0.55557203", "0.55535245", "0.55521613", "0.55501604", "0.554752", "0.55412096", "0.5530487", "0.552805", "0.5518345", "0.55153376" ]
0.0
-1
String representation for a random player. Used for writing results filenames.
def __str__(self): return "{}_human".format(self.index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n \n is_random_print = \"\"\n if self.is_random == True:\n is_random_print = \"randomly\"\n else:\n is_random_print = \"deterministically\"\n\n return \"Player for \" + self.side + \", ply = \" + str(self.ply) + \", breaks ties \" + is_random_print", "def __repr__(self):\n if self.type == Player.HUMAN:\n return(\"Human\")\n elif self.type == Player.RANDOM:\n return (\"Random\")\n elif self.type == Player.MINIMAX:\n return (\"Minimax\")\n elif self.type == Player.ABPRUNE:\n return (\"ab Pruning\")\n elif self.type == Player.CUSTOM:\n return \"Q-Learner\"\n elif self.type == Player.MIX:\n return \"MIX\"", "def __repr__(self):\n return str(self._player) + str(self._num)", "def __str__(self):\n return \"Player: {}\".format(self.name)", "def to_string(self):\n return \"Moving randomly\"", "def __str__(self):\n return \"player: \" + str(self.player) + \"\\nposition: \" + str(self.position) + \"\\naccepted: \" + str(self.accepted) + \"\\ndirections enclosing: \" + str(self.directions_enclosing) + \"\\nfinal: \" + str(self.final)", "def __repr__(self):\n return 'Player({!r}, {!r}, {!r})'.format(\n self.name,\n self.hand,\n self.score\n )", "def getPlayerFormat(self):\r\n return self.player + \"\\t\"", "def __str__(self) -> str:\n return '{}'.format(self.letter if self.player == 0 else self.player)", "def __repr__(self):\n return '<Player %s: %s>' % (self.username, repr(self.stats()))", "def __repr__(self):\n s = \"Player for \" + self.ox + \"\\n\"\n s += \" with tiebreak type: \" + self.tbt + \"\\n\"\n s += \" and ply == \" + str(self.ply) + \"\\n\\n\"\n return s", "def __repr__(self):\n return f\"Player({self.hand}, {self.total}, {self.aceCount}, {self.dealer})\"", "def __repr__(self):\n s = 'Player ' + self.checker\n\n return s", "def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)", "def __str__(self):\n return self.playername", "def getPlayerFilename(self):\n if (self.__playerName != \"???\"):\n return self.__filename\n else:\n return \"\"", "def __str__(self):\n res = \"Opponent: \" + str(self.opponent) + \" Angle: \" + str(self.angle)\n return res", "def __str__(self):\n return f\"This player has {self.hand} for a current total of {self.total} and {self.aceCount} Aces \" \\\n f\"valued at a soft 11. This player is a dealer: {self.dealer}.\"", "def __repr__(self):\n return \"Player('{}', {})\".format(self.name, self.tendency.__repr__())", "def __repr__(self):\n name = self.name\n prefix = \": \"\n gen = (\n value for value in self.init_kwargs.values() if value is not None\n )\n for value in gen:\n try:\n if issubclass(value, Player):\n value = value.name\n except TypeError:\n pass\n name = \"\".join([name, prefix, str(value)])\n prefix = \", \"\n return name", "def __repr__(self):\n out = ''\n out += f'\\nPlayer {self.number}: {self.name}\\n'\n\n # checks for trail options before printing.\n if len(self.trail_options) > 0:\n out += f'\\nTrail Options:\\n'\n for item in self.trail_options:\n out += f' {item}'\n else:\n out += f'\\nSadly, {self.name} is out of trail options.\\n'\n\n # checks for supplies before printing.\n if len(self.supplies) > 0:\n out += f'\\nSupplies:\\n'\n for item in self.supplies:\n out += f' {item[0]}\\n'\n else:\n out += f'\\nSadly, {self.name} is out of supplies.\\n'\n\n return out", "def __repr__(self):\n\n return f\"Player(name={self.name}, score={self.total_points})\"", "def announce_player(self) -> str:\n return f\"Player Name: {self.name}, Team name: {self.team}, Number: {str(self.number)}\"", "def __repr__(self):\r\n s = 'Player ' + self.checker + ' (' + self.tiebreak + ', ' + str(self.lookahead) + ')'\r\n return s", "def __str__(self):\n result = \", \".join(map(str, self.hand))\n result += \"\\n \" + str(self.get_score()) + \" points\"\n return result", "def __repr__(self):\r\n c = \"Player \" + self.checker + \" (\" + self.tiebreak + \", \" + str(self.lookahead) + \")\"\r\n return c", "def stringify(self):\n string = self.chars[\"type\"] + \" \"\n \n # current hearts\n for _ in range(self.hearts):\n string += self.chars[\"heart\"]\n\n # dead hearts\n for _ in range(3 - self.hearts):\n string += self.chars[\"noheart\"]\n\n return string", "def nice_output(self):\n return 'Pitch: {0} at {1}: {2}'.format(\n self.pitch_type, self.start_speed, self.des)", "def generate_results_string(player_list, singular_result, plural_result):\n string = \"\"\n plural = len(player_list) > 1\n player_number = 1\n if len(player_list) != 0:\n string += \"Player \"\n for player in player_list:\n string += player.get_name()\n if player_number < len(player_list) - 1:\n string += \", \"\n elif player_number < len(player_list):\n string += \" & \"\n player_number += 1\n if plural:\n string = string[:6] + \"s\" + string[6:] + plural_result\n else:\n string += singular_result\n return string", "def __str__(self):\n if self.showOneCard:\n return str(self.cards[0])\n else:\n return Player.__str__(self)", "def __str__(self):\n return (\"UUID: \" + str(self.uuid) + \"\\n\"\n \"Data: \" + str(self.data) + \"\\n\" +\n \"Tex: \" + str(self.texOutput) + \"\\n\")", "def getDescription(self):\n return \"GGP Players (*.player)\"", "def __str__(self):\n return \"UID {0}, Key {1}, Cipher {2}, PRNG {3}\".format(hex(self.uid), \n hex(self.key), hex(self.cipher), hex(self.prng))", "def __repr__(self):\r\n s = 'Player ' + str(self.checker)\r\n v = ' ('+ self.tiebreak+', '+str(self.lookahead)+')'\r\n s += v\r\n return s", "def __repr__(self):\n stringrepr = self.__class__.__name__ + \" PRNG. seed: \" + \\\n str(self.baseseed) + \" counter: \" + str(self.counter) + \\\n \" randbits_remaining: \" + str(self.randbits_remaining)\n return stringrepr", "def str_players_with_hand(self):\n message = \"Players and their hands\\n\\n\" + self.bold_message(self.dealer.str_with_hand()) + \"\\n\"\n for player in self.players:\n if isinstance(player, user.User):\n message += player.str_with_hand() + \"\\n\"\n return message", "def __str__(self):\n return \"{} : {}\".format(self._team_name, self._win_ratio)", "def __str__(self):\n return Hand.__str__(self) + '\\nHand Rank: ' + self.get_full_label()", "def to_json(self):\n player = {\n 'name': self.name,\n 'colour': self.colour,\n 'gender': self.gender,\n 'uid': self.UID,\n 'position': self.position,\n 'money': self.money,\n 'ready': self.is_ready(),\n }\n return player", "def __str__(self):\n return f'Character name: {self.name}\\nhealth: {self.health}\\n' \\\n f'strength: {self.strength}\\nchance dodge: ' \\\n f'{round(self.chance_dodge, 2)}\\nchance critical:' \\\n f' {round(self.chance_critical, 2)} '", "def to_string(self):\n\n return '[[%s], [%s]], [%d, %d], [%s], %s, %s, [%s]' % \\\n (', '.join(INT2STRING_CARD[h] for h in self.hand[0]),\n ', '.join(INT2STRING_CARD[h] for h in self.hand[1]),\n self.pot[0], self.pot[1],\n ', '.join(INT2STRING_CARD[p] for p in self.pub),\n INT2STRING_PHASE[self.phase],\n INT2STRING_PLAYER[self.player],\n ', '.join(INT2STRING_STATUS[s] for s in self.status))", "def name(self) -> str:\n try:\n return self.stats[\"Player name\"]\n except KeyError as ke:\n logger.debug(ke, exc_info=True)\n logger.warn(\"unable to get player name\")\n return \"\"", "def __str__(self):\n struct_repr = \", \".join([\n \"w: \" + str(self.w),\n \"x: \" + str(self.x),\n \"y: \" + str(self.y),\n \"z: \" + str(self.z)\n ])\n\n return f\"Quaternion: [{struct_repr}]\"", "def __str__(self):\n if self._rank is None:\n rank_str = \"\"\n else:\n rank_str = str(self._rank + 1)\n\n if self._file is None:\n file_str = \"\"\n else:\n file_str = chr(self._file + 97)\n\n return file_str + rank_str", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def __str__(self):\n string = \"Hand contains \"\n h = self.hand\n \n for i in range(len(h)):\n string += str(h[i].get_suit()) + str(h[i].get_rank()) + \" \"\n \n return string", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n return str(self.rank)+str(self.suit)", "def __str__(self):\n\n if self.sampling is not None:\n strme = \"move volume {} {} {}\"\\\n .format(self.key, self.sampling, self.pfreq)\n else:\n strme = \"move volume {} {}\".format(self.key, self.pfreq)\n\n return strme", "def player_id(self):\n \n return 'audio-player-%s' % hash((time.time(),))", "def printPlayerStats(self):\n\t\tplayerStats = ['Name = ' + self.name, \n\t\t\t\t\t 'Agility = ' + str(self.agility), \n\t\t\t\t\t 'Personality = ' + str(self.personality), \n\t\t\t\t\t 'Sanity = ' + str(self.sanity), \n\t\t\t\t\t 'Strength = ' + str(self.strength), \n\t\t\t\t\t 'Progress = ' + str(self.progress)]\n\t\tprint playerStats", "def generate_message(self):\n\t\tmsg = \"\"\n\t\tfor idx, player in enumerate(self.players, start=1):\n\t\t\tmsg += f\"Player {idx} - {player.display_name}\\n\"\n\t\tmsg += (\n\t\t\tf\"\\nClick the `Join Game` button to join. Up to {self.max_players} players can join. \"\n\t\t\t\"To start with less than that many, use the `Start Game` button to begin.\"\n\t\t)\n\t\treturn msg", "def __str__(self):\n return \"{} of {}\".format(self.rank,self.suit)", "def __repr__(self):\r\n return \"Username: \" + str(self.username) + \\\r\n \", Video count in metadata: \" + str(self.video_count) + \\\r\n \", Videos IDs found: \" + str(len(self.videos))", "def __str__(self):\n return \"{}_random\".format(self.index)", "def __repr__(self) -> str:\n return f'{self.name}|{self.hp}|{self.mp}'", "def __repr__(self) -> Any:\n game_board = self.__str__() + \"\\n\"\n current_player_info = \"Is p1 the current player? \" + str(self.p1_turn)\n result = game_board + current_player_info\n return result", "def __repr__(self):\n repr = \"<BBPlayer %s at %s>\" % (self.name, hex(id(self)))\n return repr", "def player(self):\n # type: () -> string_types\n return self._player", "def __str__(self):\n return str(self.rank) + \" of \" + self.suit", "def __str__(self):\n string = 'input dim: {} \\noutput dim: {} \\n'.format(\n self.dim_inputs, self.dim_outputs\n )\n string += 'sequence length: {} \\n'.format(\n self.tensors[0].shape[1]\n )\n key = 'train' if self.train else 'test'\n string += '{}_samples: {} \\n{}_sequences: {} \\n'.format(\n key, self.experiment_length, key, self.tensors[0].shape[0]\n )\n return string", "def __str__(self):\n return '<TuebingenMEG: %i samples, %i timepoints, %i channels>' \\\n % (self.nsamples, self.ntimepoints, len(self.channelids))", "def single_temp() -> str:\n return '36.' + str(random.randint(1, 5))", "def getOpponentFormat(self):\r\n return self.opponent + \"\\t\"", "def __str__(self):\n return f\"{self.rank.title()} of {self.suit.title()}\"", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def random_teampreview(self, battle: AbstractBattle) -> str:\n members = list(range(1, len(battle.team) + 1))\n random.shuffle(members)\n return \"/team \" + \"\".join([str(c) for c in members])", "def gen_dynstring(self):\n # a timestamp plus something semi random\n return '%s.%s' % (time.strftime('%m%d%H%M%S', (time.localtime())),\n random.randint(1, 100000))", "def __str__(self):\n return ', '.join([self.yftf_data, self.info_hash, str(self.num_pieces), str(self.peers)])", "def _get_rand_str(self):\n rand_components = []\n for key, (min_val, max_val) in self.randomargs.items():\n val = random.uniform(min_val, max_val)\n if type(min_val) is int and type(max_val) is int:\n val = int(val)\n rand_components.append(f\"{key}={str(val)}\")\n return \",\".join(rand_components)", "def board_string(self, players):\n if len(self.user_guess) == 1:\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += \"-------------------\\n\"\n\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0].get_name()}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1].get_name()}: {self.user_guess[1]}, {self.applied_guess[1]}\\n\"\n board += \"-------------------\\n\"\n\n return board", "def __str__(self):\n prob = str(round(self.probability, 5))\n dprob = str(round(self.postdProbability, 5))\n output = \"dprob: \" + dprob + \" \\tprob: \" + prob + \"\\t: \"\n for key in self.attackDict.keys():\n output += key + \" \"\n return output", "def name(who):\r\n if who == 0:\r\n return 'Player 0'\r\n elif who == 1:\r\n return 'Player 1'\r\n else:\r\n return 'An unknown player'", "def getName(self):\n\n return self.player", "def __str__(self):\n return '{0} of {1}'.format(Card.rank_names[self.rank], Card.suit_names[self.suit])", "def __str__(self):\n return '{0} of {1}'.format(Card.rank_names[self.rank], Card.suit_names[self.suit])", "def get_name(self):\n return self._player_name", "def get_current_player_name(self) -> str:\n if self.p1_turn:\n return 'p1'\n return 'p2'", "def to_string(self):\n return self.dungeon_string", "def __str__(self):\n return f\"{self.face} of {self.suit} with a value of {self.value}\"", "def __str__(self):\r\n return self.suit + self.rank", "def __str__(self):\n return(' Spot\\n'\n '\\tPosition:\\n'\n f'\\tX: {self.x}\\n'\n f'\\tY: {self.y}\\n'\n f'\\tZ: {self.z}\\n'\n '\\tRotation\\n'\n f'\\tX: {self.rotX}\\n'\n f'\\tY: {self.rotY}\\n'\n f'\\tZ: {self.rotZ}\\n'\n )", "def __str__(self):\n return \"Name: \" + self._name + \"\\nScores: \" + \\\n \" \".join(map(str, self._scores))", "def to_str(self) -> str:\n private = self.privacy == 1\n owners: List[str] = []\n for o in self.owners:\n user = self.bot.get_user(o)\n owners.append(user.name if user else \"Unknown user\")\n return f\"Wormhole: {self.name}\\n┗━▷ Private: {private} - Admins: {', '.join(owners)} - **{self.channels}** Discord channels are linked\"", "def __repr__(self):\n return \"\\nSprite info: \" + self.name + \"\\nx = {0}\\ny = {1}\\nhealth = {2}\\nstrength = {3}\\nloot = {4}\\n\"\\\n .format(self.x, self.y, self.health, self.strength, self.loot)", "def __str__(self):\n #Create dictionary for face cards\n translate = {11:'Jack', 12:'Queen', 13:'King', 14: 'Ace'}\n r = self._rank\n #check for face card\n if r in [11, 12, 13, 14]:\n myrank = translate[r]\n else:\n myrank = str(r)\n return myrank + \" of \" + self._suit", "def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def __setPlayerFilename(self):\n if self.__playerName != \"???\":\n l=self.__playerName.rsplit(\" \")\n nameWithoutSpaces=\"_\".join(l)\n self.__filename = fileLocs.playerProfiles+\"\\\\\"+nameWithoutSpaces+r\".p\"", "def __repr__(self: object) -> str:\n measstring: str = \"Tatort - {:04d} - {} - {} - {} - {}\".format(self.episode_id, self.episode_name, self.episode_inspectors, self.episode_sequence, self.episode_broadcast)\n return measstring", "def to_string(self):\n\n if self.player == CHANCE: # is chance\n return ', '.join(INT2STRING_CARD[c] for c in self.deal)\n else: # is not chance\n return INT2STRING_ACTION[self.action]", "def __str__(self):\n out = \"{}.\".format(self.move_number)\n if self.white.san != \"\":\n out += \" \" + str(self.white)\n else:\n out += \"..\"\n if self.black.san != \"\":\n out += \" \" + str(self.black)\n if self.comment:\n out += \" {\" + self.comment + \"}\"\n return out", "def __str__(self): \n \n return self.suit + self.rank", "def __str__(self):\n return self._last_opponent", "def to_str(self) -> str:\n perms = (\n \"Write and Read\"\n if self.perms == \"wr\"\n else \"Read\"\n if self.perms == \"r\"\n else \"Write\"\n )\n return f\"Channel: <#{self.channelID}>\\n┗━▷ Linked to **{self.wh}** - Permissions: *{perms}*\"", "def __str__(self):\n string = \"Deck contains \"\n\n for i in range(len(self.deck)):\n string += str(self.deck[i].get_suit()) + str(self.deck[i].get_rank()) + \" \"\n return string", "def __str__(self):\n table_data = [\n ['', 'C', 'G', 'A', 'T'],\n ['total', str(self.total['C']), str(self.total['G']), str(self.total['A']), str(self.total['T'])],\n ['reverse half strand', str(self.reverse['C']), str(self.reverse['G']), str(self.reverse['A']),\n str(self.reverse['T'])],\n ['forward half strand', str(self.forward['C']), str(self.forward['G']), str(self.forward['A']),\n str(self.forward['T'])]\n ]\n table = AsciiTable(table_data)\n return \"Generation #{}\\n\".format(self.epoch) + table.table", "def print_player_info(self):\n\t\tclear_screen()\n\n\t\tprint(\"# PLAYER INFO #\\n\")\n\t\tprint(\"Name{:.>17} \".format(self.info['Name']))\n\t\tprint(\"Race{:.>17} \".format(self.info['Race']))\n\t\tprint(\"Level{:.>16} \".format(self.stats['Level']))\n\t\tprint(\"Hit Points{:.>11} \".format(self.stats['HP']))\n\t\tprint(\"Gold Pieces{:.>10} \".format(self.stats['GOLD']))\n\t\n\t\tpress_enter()", "def generate_producer_name():\n return movie_producer_surnames[random.randint(0, len(movie_producer_surnames) - 1)] + \" \" + movie_producer_surnames[random.randint(0, len(movie_producer_surnames) - 1)]", "def get_player_name(self):\n return self._player_name" ]
[ "0.7373667", "0.71827644", "0.70852", "0.69778293", "0.69040483", "0.68909234", "0.6811406", "0.6797799", "0.6739397", "0.673668", "0.6681964", "0.66097915", "0.6596855", "0.658164", "0.6565075", "0.64829636", "0.6482711", "0.64633006", "0.64110553", "0.6410214", "0.6378095", "0.63684297", "0.629823", "0.6289816", "0.612884", "0.6116562", "0.61127436", "0.6111311", "0.6102965", "0.609162", "0.6085754", "0.6072979", "0.60673195", "0.60605264", "0.6058458", "0.60522413", "0.6039757", "0.60174364", "0.60073346", "0.6006034", "0.60034835", "0.6000418", "0.5998494", "0.598956", "0.59887326", "0.59844667", "0.59764796", "0.5970785", "0.596426", "0.5954844", "0.595103", "0.5948352", "0.59414744", "0.5934815", "0.59313565", "0.59150225", "0.5913866", "0.590119", "0.5898231", "0.5889459", "0.5862595", "0.5860943", "0.5852926", "0.58513117", "0.5841902", "0.58329445", "0.5816344", "0.58145046", "0.5809985", "0.57859886", "0.57842934", "0.5781801", "0.57804793", "0.5779944", "0.5776383", "0.5776383", "0.57733095", "0.5768743", "0.57663465", "0.5765477", "0.5757846", "0.57522774", "0.5750185", "0.5737686", "0.5736952", "0.5731932", "0.57289654", "0.5727055", "0.57258224", "0.5724505", "0.5719306", "0.5718246", "0.5716911", "0.5708694", "0.570848", "0.570107", "0.5697704", "0.56835306", "0.568132", "0.5679843", "0.5678637" ]
0.0
-1
Chooses a move for the player by calling random move.
def chooseMove(self, game): return self.randomMove(game)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_random(self, board):\n self.get_moves(board.board)\n return random.choice(self.available_moves)", "def move(self):\r\n his_move = random.randint(0, 2)\r\n return the_moves[his_move]", "def computer_move():\n\tmove = random.choice(moves)\n\tprint \"Computer's move is %s\" % move\n\treturn move", "def random_player(board_state, _):\n moves = list(available_moves(board_state))\n return random.choice(moves)", "def random_legal_move():\n return random.choice(legal_moves())", "def get_random_move(self, valid_moves):\n return random.choice(valid_moves)", "def random_move(self):\n available_idx = self.get_empty_cells(self.game_board)\n return random.choice(available_idx)", "def randomMove(self, game):\n #time.sleep(0.25)\n return random.choice(game.get_all_legal_moves())", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def move(self):\n\n choices = []\n if self.game.green_apples > 0:\n choices.append(self.game.move_green)\n if self.game.red_apples > 0:\n choices.append(self.game.move_red)\n if self.game.blue_plums > 0:\n choices.append(self.game.move_blue)\n if self.game.yellow_pears > 0:\n choices.append(self.game.move_yellow)\n\n random_index = random.randint(0, len(choices) - 1)\n f = choices[random_index]\n f(True)", "def move(self):\r\n my_move = self.last_moves[\"my_move\"]\r\n return (my_move != \"\" and moves[(moves.index(my_move)+1) % 3] or\r\n random.choice(moves))", "def __get_next_random_move(self, game_state):\n return random.choice(self.__get_free_seats(game_state))", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def random_strategy(player, board):\n return random.choice(Othello.legal_moves(player, board))", "def move(self):\r\n their_move = self.last_moves[\"their_move\"]\r\n return (their_move == \"\" and random.choice(moves) or their_move)", "def make_random_move(self):\n #raise NotImplementedError\n # Take out moves_made as well as mines detected\n self.available_cells = self.available_cells - self.moves_made - self.mines\n available_cells = self.available_cells.copy()\n\n # I'll first try and see if there's any move not within the nearby of\n # The mines, I think this can maximise survivability in some cases\n # It'll still work even if didn't do the below\n for sentence in self.knowledge:\n available_cells -= sentence.cells\n #print(sentence)\n #print(self.mines)\n\n # Making a random move\n length = len(available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n\n length = len(self.available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(self.available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def move(self):\n if self.learn is None:\n return random.choice(moves)\n else:\n return self.storedmove", "def take_move_player_turn(self, move_player_fxn):\n x, y = self.player.x, self.player.y\n tiles = self.board.get_landable_tiles_around(x, y)\n target = random.choice(tiles)\n move_player_fxn(target.x, target.y)", "def choose_move(self, board):\n if self.opp == Player.HUMAN:\n time.sleep(4)\n if self.type == Player.HUMAN:\n move = input(\"Please enter your move:\")\n while not board.legalMove(self, move):\n print(move, \"is not valid\")\n move = input(\"Please enter your move\")\n return move\n elif self.type == Player.RANDOM:\n move = choice(board.legalMoves(self))\n return move\n elif self.type == Player.MINIMAX:\n val, move = self.minimax_move(board, self.depth * 2,\n Player.MAX_PLAYER)\n board.last_move = move\n return move\n elif self.type == Player.ABPRUNE:\n val, move = self.alpha_beta_move(board, self.depth * 2,\n float('-inf'), float('inf'),\n Player.MAX_PLAYER)\n return move\n elif self.type == Player.CUSTOM:\n move = self.agent.getAction(board)\n self.agent.update_current_state(board, move)\n return move\n elif self.type == Player.MIX:\n return self.mixed_move(board)\n\n else:\n print(\"Unknown player type\")\n return -1", "def choose_move(self): # pylint: disable=too-many-branches,too-many-return-statements\n if self.current_mana < 10: # Only usable move\n return self.moves.teleport\n\n if self.game.player.current_hp <= 10 and self.current_mana >= self.moves.claw.mana_cost:\n return self.moves.claw\n if self.game.player.current_hp <= 20:\n return self.moves.glide\n if self.game.player.current_hp <= 30:\n if self.current_mana < 50:\n options = {self.moves.teleport: 3, self.moves.glide: 6}\n elif self.current_mana <= 140:\n options = {self.moves.teleport: 1, self.moves.glide: 2, self.moves.claw: 6}\n else:\n options = {self.moves.glide: 2.3333333333, self.moves.claw: 6.6666666667}\n if self.current_hp <= 180:\n options[self.moves.heal] = 1\n return self.random_weighted(options)\n\n if self.current_hp < 25:\n if self.current_mana < 50:\n return self.random_weighted({self.moves.teleport: 0.1, self.moves.glide: 0.1, self.moves.heal: 0.8})\n if self.game.player.current_hp <= 40:\n return random.choice([self.moves.claw, self.moves.heal])\n\n if random.random() < 0.1:\n return random.choice(self.attack_options())\n return self.moves.heal\n\n options = self.attack_options()\n if self.current_hp <= 0.9*self.max_hp:\n options.append(self.moves.heal)\n return random.choice(options)", "def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())", "def make_random_move(self):\n \n\n if len(self.moves_made) == 56:\n return None\n\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n not_safe_moves = self.moves_made | self.mines\n\n while random_move in not_safe_moves:\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n return random_move", "def chooseRandomly(self, moves):\n # pick a move randomly\n moveIndex = random.randint(0, len(moves) - 1)\n # send it back\n return moves[moveIndex]", "def make_random_move(state: State) -> State:\n return random.choice(state.get_possible_states())", "def select_move(self):\r\n while True:\r\n move = random.randint(0,8)\r\n if self.grid[move][-1] == ' ':\r\n return move", "def random_play(state, player):\n import random\n actions = YoteRules.get_player_actions(state, player, reward_move=state.rewarding_move)\n choice = random.choice(actions)\n return choice", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def pick_movement(self, movement, rand_chance=0.1):\n rand_num = np.random.rand()\n move = self.movements[movement]\n move_str = movement\n if rand_num > 1.0 - rand_chance:\n # do random action now\n print(\"----MOVEMENT HAS CHANGED----\")\n move_str = choice(list(self.movements.keys()))\n move = self.movements[move_str]\n # return the movement\n return move, move_str", "def rand_select(board):\n import random\n moves = [move for move, new_board in get_all_next_moves(board)]\n return moves[random.randint(0, len(moves) - 1)]", "def randomMove(board):\r\n go = True\r\n while go:\r\n y = random.randint(0, board.size - 1)\r\n x = random.randint(0, board.size - 1)\r\n go = not board.validMove((y, x))\r\n return (y, x)", "def chooseMove(self):\n\t\tlistOfColumns = [0,1,2,3,4,5,6]\n\t\tresult = random.choice(listOfColumns)\n\t\t\n\t\twhile (self.game.isValid(result+1) != True):\n\t\t\tresult = random.choice(listOfColumns)\n\t\treturn result", "def selectMove(self,q_table, state):\r\n valid_moves = self.game.valid_moves(state)\r\n ep = 0.2\r\n res = random.randint(0,9)\r\n if res < ep*10:\r\n return valid_moves[random.randint(0,len(valid_moves)-1)]\r\n else:\r\n return self.bestMove(q_table,state, valid_moves)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_move(board):\n\tpossible_moves = []\n\tboard_copy = list(board)\n\n\tfor count, player in enumerate(board):\n\t\tif player == ' ':\n\t\t\tpossible_moves.append(count)\n\n\tif len(possible_moves) != 0:\n\t\tmove = random.choice(possible_moves)\n\t\tboard_copy[move] = 'o'\n\n\t\treturn ''.join(board_copy)\n\t\n\telse:\n\t\treturn board", "def person_move(self, pos1=None, pos2=None):\n if self.available_combinations() != []:\n if pos1 is None and pos2 is None:\n comb = random.choice(self.available_combinations())\n self.board[comb[0]][comb[1]] = 5\n else:\n if (pos1, pos2) in self.available_combinations():\n self.board[pos1][pos2] = 5\n else:\n print(\"This position is not available\")\n raise TextException\n self.last_move = 5", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def auto_play_random(self, player=None):\r\n if player is None:\r\n player = self.get_player()\r\n legal_list = self.get_legal_list()\r\n next_move = legal_list.rand_obj()\r\n self.new_edge(next_move)", "def chooseMove(playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n \r\n if SHOW_OUTPUT: print(displayBoard(playerBoard, oppBoard, playerSeeds, oppSeeds))\r\n \r\n moves = getValidMoves(playerBoard, oppBoard)\r\n random.shuffle(moves)\r\n return moves[0]", "def choose_move(self) -> int:\n raise NotImplementedError(\"Choose move not implemented\")", "def make_random_move(self):\n # get copy of the empty board\n board = set([(i, j) for i in range(self.height) for j in range(self.width)])\n\n for move in board:\n if not move in self.moves_made and not move in self.mines:\n return move\n\n return None", "def perform_random_move(grid, move=None):\n zpos = grid.index(0)\n move_functions = [move_up, move_down, move_left, move_right]\n test_functions = [try_up, try_down, try_left, try_right]\n valid_functions = [move_functions[i] for i in [0, 1, 2, 3] if test_functions[i](grid)]\n randnum = random.randint(0, len(valid_functions) - 1)\n return grid, valid_functions[randnum](grid, zpos)", "def choose_move(self):\n raise NotImplementedError()", "def random_place(board, player):\n available = possibilities(board)\n place(board, player, random.choice(available))", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)", "def biased_choice(self):\n \n move = None\n # checks if this is the first move\n if self.last_move is not None:\n # checks whether to keep moving in the same direction\n if random.random() < self.move_prob:\n move = self.last_move\n # chooses a random move\n if move is None:\n move = random.choice(self.moves)\n return move", "def next_move(self, board, dice):\r\n rulebook = RuleBook(board, self, dice)\r\n legal_plies = rulebook.generate_legal_ply_list()\r\n return random.choice(legal_plies)", "def choose_move(self):\n return 0", "def mc_trial(board, player):\n winner = board.check_win()\n while winner == None:\n next_move = random.choice(board.get_empty_squares())\n board.move(next_move[0], next_move[1], player)\n winner = board.check_win()\n player = provided.switch_player(player)", "def choose_move(self):\r\n \r\n return None", "def choose_move(self, possible_moves, seconds_left):\n # TODO: update this method\n print('\\--------------Choose Move--------------/')\n print(possible_moves)\n print(list(self.current_board.legal_moves))\n search_tree = MCTS(5, self.color, self.current_board)\n search_tree.search()\n move = search_tree.pick_move()['move']\n\n return move", "def _move_comp_person(self):\n\n move_tuple = random.choice(self._board.possible())\n self._board[move_tuple] = 'x'", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def mc_trial(board, player):\r\n while(board.check_win() == None):\r\n blankies = board.get_empty_squares()\r\n target = random.choice(blankies)\r\n board.move(target[0],target[1],player)\r\n if player == provided.PLAYERX:\r\n player = provided.PLAYERO\r\n else:\r\n player = provided.PLAYERX", "def make_move(the_board, color):\n legal_moves = the_board.legal_moves(color)\n return random.choice(legal_moves) if len(legal_moves) > 0 else (-1, -1)", "def ai_move(self, player):\n tiles = [t for t in self if self.valid_move(player, t)]\n\n to_max = lambda t: t.maxnum - t.num\n tiles.sort(key=to_max)\n loc = rndchoice( [first(tiles), rndchoice(tiles)] )\n if loc == self.current:\n self.hl_visible = False\n return loc", "def handle_get_action(self, state):\n\n # This is an example player who picks random moves. REMOVE THIS WHEN YOU ADD YOUR OWN CODE !!\n\n #next_move = tuple(self.pick_random_free_cell(\n # state, size=int(math.sqrt(len(state)-1))))\n #############################\n #\n #\n NN_state = self.server_state_to_NN_state(state)\n predictions = self.policy_network.predict([[NN_state]])\n next_move = np.argmax(predictions)\n self.game.set_state(NN_state,1)\n legal_actions = self.game.get_legal_actions()\n if next_move not in legal_actions:\n next_move = np.random.choice(legal_actions,1)\n next_move = self.action_to_tuple_action(next_move)\n\n #\n # next_move = ???\n ##############################\n return next_move", "def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move", "def next_move():\n move = int(4 * random.random())\n if move == 0:\n return [1, 0]\n elif move == 1:\n return [-1, 0] \n elif move == 2:\n return [0, 1] \n else:\n return [0, -1]", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def getMove(self, board):\r\n self.thisNumTurns += 1\r\n moves = self._getAvailableActions(board)\r\n return moves[random.randint(len(moves))]", "def choose_random_move_from_list(self, board, moves_list):\n possible_moves = []\n for move in moves_list:\n if board.is_position_availible(move):\n possible_moves.append(move)\n if len(possible_moves) > 0:\n return random.choice(possible_moves)\n return None", "def takeNaiveMove():\r\n\tnotFound=True\r\n\twhile notFound:\r\n\t\tmove=random.randint(1,9)\r\n\t\tif validMove(move):\r\n\t\t\tnotFound=False\r\n\treturn move", "def play_move(self, move, player):\n if move in self.available_moves:\n self.available_moves.remove(move)\n self.board[move] = player\n else:\n raise ValueError('Move [{} - {}] not possible.'.format(move, player))", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def _makeAMove(self, prev_move, board: str) -> int:\n # If not overridden, make a random valid move.\n move = choice(validMoves(board))\n return move", "def select_move(self, game, player) -> int:\n print()\n print(f\"Player {player} turn\")\n game.print_player_perspective(player)\n\n move = -1\n while(move==-1):\n entered_move = input (\"Enter move: \")\n\n if(int(entered_move) in game.possible_moves(player)):\n move = int(entered_move)\n else:\n print(\"Entered an invalid move\")\n\n print()\n return move", "def choose_move(self, battle):\n # If the player can attack, it will\n if battle.available_moves:\n # Finds the best move among available ones\n best_move = max(battle.available_moves, key=lambda move: move.base_power)\n return self.create_order(best_move)\n\n # If no attack is available, a random switch will be made\n else:\n return self.choose_random_move(battle)", "def mc_trial(board, player):\n if board.check_win() != None:\n return\n empty_squares = board.get_empty_squares()\n squares = empty_squares[random.randrange(len(empty_squares))]\n board.move(squares[0], squares[1], player)\n mc_trial(board, provided.switch_player(player))", "def choose_absolute_move(self):\n move = self.choose_move()\n if self.player_name == 'A':\n return move\n # Player B, revert the IDs\n return (move + 6) % 12", "def makeMove(self, move, player):", "def cpu_move(moves_list):\n \n move = randint(0,3)\n return move", "def select_move(self, game_state):\n raise NotImplementedError()", "def test_move_default_dropped_steps(self):\n player = ss.LazyPlayer()\n random.seed(2)\n player.move()\n random.seed(5)\n player.move()\n assert player.position == 44", "def choose_next(self, round):\n return random.choice(self.possible_coords)", "def move(self, board):\n return self.prng.choice(board.available())", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def getMove(player):\n\n\tsquares = { \"1\":1, \"2\":2, \"3\":3, \"4\":4, \"5\":5, \"6\":6, \"7\":7, \"8\":8, \"9\":9 }\n\tchoice = input(\"Player \" + str(player + 1) + \", pick a square (1-9): \")\n\ttry:\n\t\treturn squares[choice]\n\texcept KeyError:\n\t\tpass", "def mc_trial(board, player):\n while board.check_win() == None:\n emp_sqrs = board.get_empty_squares()\n chosen_sq = random.choice(emp_sqrs)\n board.move(chosen_sq[0], chosen_sq[1], player)\n player = provided.switch_player(player)\n return", "def mc_trial(board, player):\n tmp_player=player\n while board.check_win()==None:\n #print board.check_win()\n empty=board.get_empty_squares()\n #print empty\n square=random.choice(empty)\n #print square\n board.move(square[0],square[1],tmp_player)\n tmp_player=switch_player(tmp_player)\n return", "def play_human_move(self):\n success, info = self.gms.play_human_move(raw_input('Make your next move\\n'.format('')))\n if success:\n print(self.gms.game.get_board_state_pretty())\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n self.play_human_move()\n elif info['status_code'] in [\n core_constants.GAME_STATUS_OVER_DRAW,\n core_constants.GAME_STATUS_OVER_HUMAN_WINNER,\n core_constants.GAME_STATUS_OVER_COMP_WINNER,\n ]:\n print(self.gms.status_code_message_map[info['status_code']])\n else:\n if info['error_code'] == core_constants.ERROR_CODE_INVALID_MOVE:\n self.play_human_move()", "def requestMove(self) -> None:\n\n # player's turn to make a move\n if self.whoseTurn == self.player:\n position: int = int(input(f\"{self.player.getName()}'s turn : \"))\n self.player.insertSymbol(position)\n self.whoseTurn = self.ai\n\n # AI's turn to make a move\n else:\n print(f\"{self.ai.getName()}'s turn\")\n self.ai.makeBestMove()\n self.whoseTurn = self.player", "def choose_random_move(self, battle: AbstractBattle) -> BattleOrder:\n if isinstance(battle, Battle):\n return self.choose_random_singles_move(battle)\n elif isinstance(battle, DoubleBattle):\n return self.choose_random_doubles_move(battle)\n else:\n raise ValueError(\n \"battle should be Battle or DoubleBattle. Received %d\" % (type(battle))\n )", "def get_next_move(self):\n return int(input('Enter your move: '))", "def make_move(self, board: Block):\n # select a random block and highlight it.\n rand_block = select_random_block(board)\n rand_block.highlighted = True\n self.renderer.draw(board, self.id)\n pygame.time.wait(TIME_DELAY)\n choice = random.randint(0, 4)\n\n if rand_block.level == rand_block.max_depth or rand_block.level == 0:\n # Random player has chosen to smash an invalid block thus its move\n # is forfeited\n if choice == 4:\n pass\n else:\n perform_move(rand_block, choice)\n else:\n perform_move(rand_block, choice)\n rand_block.highlighted = False\n self.renderer.draw(board, self.id)\n return 0", "def human_v_random(human_player=1):\n if human_player == 1:\n p1_strategy = strategies.Human()\n p2_strategy = strategies.RandomStrategy()\n else:\n human_player = 2\n p2_strategy = strategies.Human()\n p1_strategy = strategies.RandomStrategy()\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n message = 'Welcome to tic tac toe!\\n'+\\\n 'You are playing against a random opponent and you are player '+str(human_player)+'.\\n'+\\\n 'To make a move, enter the number of the square which you would like to play, labelled as:\\n'+\\\n '1, 2, 3\\n4, 5, 6\\n7, 8, 9\\n\\n'\n print(message)\n game = rl_game.Game(p1, p2, board)\n game.play_one()", "def mc_trial(board, player):\n current_player = player\n winner = None\n\n while winner == None:\n empty_spaces = board.get_empty_squares()\n move_space = random.choice(empty_spaces)\n board.move(move_space[0], move_space[1], current_player)\n winner = board.check_win()\n provided.switch_player(current_player)", "def move(self, gstate: gamestate.Gamestate) -> util.Move:\n moves = gstate.legal_moves_vector(gstate.agents[self.id])\n scores = {move: self.evaluate(gstate.copy, move) for move in moves}\n max_score = max(scores.values())\n max_moves = [move for move in moves if scores[move] == max_score]\n return random.choice(max_moves)", "def move(self):\n move = input(\"Pick one weapon - rock, scissors, paper: \").lower()\n while move not in moves:\n \"\"\"Prints out a message to try again\n when something is mistyped or a weapon that is not\n in the moves variable is typed. Will keep repeating\n until a validated move is played.\"\"\"\n move = input(\"You can only use rock, scissors, paper: \").lower()\n return move", "def _get_computer_move():\n return choice(choices)", "def mc_trial(board, player):\n empty_squares = board.get_empty_squares()\n \n if board.check_win() == None:\n random_square = random.choice(empty_squares)\n board.move(random_square[0],random_square[1], player)\n player = provided.switch_player(player)\n mc_trial(board, player)", "def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1", "def play_ai(**args):\n board = args.get('board')\n available_moves = [position for position, turn in board.items()\n if turn == \" \"]\n idx = random.randint(0, len(available_moves) - 1)\n return available_moves[idx]", "def move(self, move):\n if \"value\" in move:\n value = move[\"value\"]\n else:\n value = 0\n\n value = max(value, 0)\n self.roll = random.randint(0, self.max)\n\n if value < self.roll:\n self.scores[self.current_player] += value\n if not self.last_round and self.scores[self.current_player] >= 100:\n self.last_round = True\n self.last_player = self.current_player\n else:\n self.current_player = (self.current_player + 1) % self.num_players\n\n self.turn += 1", "def move_monster(monster):\n monster = (clip(monster[0] + randint(-1, 1), 0, X_SIZE-1),\n clip(monster[1] + randint(-1, 1), 0, Y_SIZE-1))\n return monster" ]
[ "0.8052953", "0.7979933", "0.78945196", "0.7858158", "0.7849089", "0.78423244", "0.7794394", "0.77650255", "0.7646826", "0.7567179", "0.7565184", "0.755895", "0.754698", "0.75232786", "0.74944186", "0.74268633", "0.7360817", "0.7327671", "0.73010206", "0.7293377", "0.7293366", "0.72874355", "0.72822696", "0.7241936", "0.7204992", "0.7073883", "0.7052321", "0.7013767", "0.70055175", "0.69977474", "0.6979314", "0.6966382", "0.69531953", "0.69531953", "0.69500095", "0.6943476", "0.6930712", "0.6909821", "0.6880245", "0.6879338", "0.68701434", "0.6861205", "0.67920166", "0.6777001", "0.6776376", "0.67743033", "0.67527306", "0.6730212", "0.67092824", "0.67054546", "0.6689585", "0.6677171", "0.6664721", "0.6650104", "0.66468054", "0.66445166", "0.6635914", "0.6635017", "0.6633581", "0.6623917", "0.6623396", "0.6608373", "0.6601331", "0.6591785", "0.6589891", "0.6574448", "0.65497464", "0.65408856", "0.6518497", "0.65055513", "0.6495545", "0.64843315", "0.648334", "0.64806414", "0.6465066", "0.6453047", "0.6451593", "0.6445988", "0.6437766", "0.64273185", "0.6407055", "0.63892484", "0.63841885", "0.6361205", "0.6353497", "0.6352598", "0.63343596", "0.6328518", "0.63168913", "0.6314204", "0.626235", "0.6261415", "0.62394154", "0.6226994", "0.6216712", "0.62148213", "0.62047946", "0.62002903", "0.61999714", "0.61985" ]
0.7899985
2
Get list of legal moves and return any random one.
def randomMove(self, game): #time.sleep(0.25) return random.choice(game.get_all_legal_moves())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_legal_move():\n return random.choice(legal_moves())", "def get_random_move(self, valid_moves):\n return random.choice(valid_moves)", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def move_random(self, board):\n self.get_moves(board.board)\n return random.choice(self.available_moves)", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def choose_random_move_from_list(self, board, moves_list):\n possible_moves = []\n for move in moves_list:\n if board.is_position_availible(move):\n possible_moves.append(move)\n if len(possible_moves) > 0:\n return random.choice(possible_moves)\n return None", "def move(self):\r\n his_move = random.randint(0, 2)\r\n return the_moves[his_move]", "def random_strategy(player, board):\n return random.choice(Othello.legal_moves(player, board))", "def make_random_move(self):\n #raise NotImplementedError\n # Take out moves_made as well as mines detected\n self.available_cells = self.available_cells - self.moves_made - self.mines\n available_cells = self.available_cells.copy()\n\n # I'll first try and see if there's any move not within the nearby of\n # The mines, I think this can maximise survivability in some cases\n # It'll still work even if didn't do the below\n for sentence in self.knowledge:\n available_cells -= sentence.cells\n #print(sentence)\n #print(self.mines)\n\n # Making a random move\n length = len(available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n\n length = len(self.available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(self.available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def make_random_move(self):\n \n\n if len(self.moves_made) == 56:\n return None\n\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n not_safe_moves = self.moves_made | self.mines\n\n while random_move in not_safe_moves:\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n return random_move", "def __get_next_random_move(self, game_state):\n return random.choice(self.__get_free_seats(game_state))", "def make_random_move(self):\n # get copy of the empty board\n board = set([(i, j) for i in range(self.height) for j in range(self.width)])\n\n for move in board:\n if not move in self.moves_made and not move in self.mines:\n return move\n\n return None", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def random_move(self):\n available_idx = self.get_empty_cells(self.game_board)\n return random.choice(available_idx)", "def chooseRandomly(self, moves):\n # pick a move randomly\n moveIndex = random.randint(0, len(moves) - 1)\n # send it back\n return moves[moveIndex]", "def move(self):\n if self.learn is None:\n return random.choice(moves)\n else:\n return self.storedmove", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def get_legal_moves(self):\n\n return self._legal_moves", "def move(self):\r\n their_move = self.last_moves[\"their_move\"]\r\n return (their_move == \"\" and random.choice(moves) or their_move)", "def cpu_move(moves_list):\n \n move = randint(0,3)\n return move", "def make_move(the_board, color):\n legal_moves = the_board.legal_moves(color)\n return random.choice(legal_moves) if len(legal_moves) > 0 else (-1, -1)", "def chooseMove(self, game):\n return self.randomMove(game)", "def rand_select(board):\n import random\n moves = [move for move, new_board in get_all_next_moves(board)]\n return moves[random.randint(0, len(moves) - 1)]", "def move(self, gstate: gamestate.Gamestate) -> util.Move:\n moves = gstate.legal_moves_vector(gstate.agents[self.id])\n scores = {move: self.evaluate(gstate.copy, move) for move in moves}\n max_score = max(scores.values())\n max_moves = [move for move in moves if scores[move] == max_score]\n return random.choice(max_moves)", "def getMove(self, board):\r\n self.thisNumTurns += 1\r\n moves = self._getAvailableActions(board)\r\n return moves[random.randint(len(moves))]", "def next_move():\n move = int(4 * random.random())\n if move == 0:\n return [1, 0]\n elif move == 1:\n return [-1, 0] \n elif move == 2:\n return [0, 1] \n else:\n return [0, -1]", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def GenerateMoves(position):\n return [move for move in POSSIBLE_MOVES if move <= position]", "def random_move(board):\n\tpossible_moves = []\n\tboard_copy = list(board)\n\n\tfor count, player in enumerate(board):\n\t\tif player == ' ':\n\t\t\tpossible_moves.append(count)\n\n\tif len(possible_moves) != 0:\n\t\tmove = random.choice(possible_moves)\n\t\tboard_copy[move] = 'o'\n\n\t\treturn ''.join(board_copy)\n\t\n\telse:\n\t\treturn board", "def random_player(board_state, _):\n moves = list(available_moves(board_state))\n return random.choice(moves)", "def next_move(self, board, dice):\r\n rulebook = RuleBook(board, self, dice)\r\n legal_plies = rulebook.generate_legal_ply_list()\r\n return random.choice(legal_plies)", "def randomMove(board):\r\n go = True\r\n while go:\r\n y = random.randint(0, board.size - 1)\r\n x = random.randint(0, board.size - 1)\r\n go = not board.validMove((y, x))\r\n return (y, x)", "def __find_all_moves(self, tower) -> list:\r\n choice = []\r\n for height in range(1,len(tower.tower)-2):\r\n for index in range(1,4):\r\n if self.stat_brain.is_valid(height, index, tower):\r\n choice.append((height, index))\r\n \r\n r.shuffle(choice)\r\n return choice", "def choose_move(self, possible_moves, seconds_left):\n # TODO: update this method\n print('\\--------------Choose Move--------------/')\n print(possible_moves)\n print(list(self.current_board.legal_moves))\n search_tree = MCTS(5, self.color, self.current_board)\n search_tree.search()\n move = search_tree.pick_move()['move']\n\n return move", "def make_random_move(state: State) -> State:\n return random.choice(state.get_possible_states())", "def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves", "def move(self):\r\n my_move = self.last_moves[\"my_move\"]\r\n return (my_move != \"\" and moves[(moves.index(my_move)+1) % 3] or\r\n random.choice(moves))", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def choose_move(self): # pylint: disable=too-many-branches,too-many-return-statements\n if self.current_mana < 10: # Only usable move\n return self.moves.teleport\n\n if self.game.player.current_hp <= 10 and self.current_mana >= self.moves.claw.mana_cost:\n return self.moves.claw\n if self.game.player.current_hp <= 20:\n return self.moves.glide\n if self.game.player.current_hp <= 30:\n if self.current_mana < 50:\n options = {self.moves.teleport: 3, self.moves.glide: 6}\n elif self.current_mana <= 140:\n options = {self.moves.teleport: 1, self.moves.glide: 2, self.moves.claw: 6}\n else:\n options = {self.moves.glide: 2.3333333333, self.moves.claw: 6.6666666667}\n if self.current_hp <= 180:\n options[self.moves.heal] = 1\n return self.random_weighted(options)\n\n if self.current_hp < 25:\n if self.current_mana < 50:\n return self.random_weighted({self.moves.teleport: 0.1, self.moves.glide: 0.1, self.moves.heal: 0.8})\n if self.game.player.current_hp <= 40:\n return random.choice([self.moves.claw, self.moves.heal])\n\n if random.random() < 0.1:\n return random.choice(self.attack_options())\n return self.moves.heal\n\n options = self.attack_options()\n if self.current_hp <= 0.9*self.max_hp:\n options.append(self.moves.heal)\n return random.choice(options)", "def get_moves(self, board):\n self.available_moves = [move for move in board.legal_moves]", "def legal_moves_generator(self, custom=False):\r\n possible_moves = self.null_positions\r\n possible_moves.add('PASS')\r\n temp_state = np.array(self.state)\r\n illegal_moves = set()\r\n for pos in possible_moves:\r\n illegal = True\r\n if pos != 'PASS':\r\n ortho = ORTHOGONAL_POSITIONS[(pos[0], pos[1])]\r\n for p in ortho:\r\n if self.state[p[0]][p[1]] == 0:\r\n illegal = False\r\n break\r\n elif self.to_move != self.board[p[0]][p[1]].color:\r\n if self.board[p[0]][p[1]].liberty == 1:\r\n illegal = False\r\n break\r\n\r\n elif self.state[p[0]][p[1]] == self.to_move:\r\n if self.board[p[0]][p[1]].liberty > 1:\r\n illegal = False\r\n break\r\n if illegal:\r\n illegal_moves.add(pos)\r\n temp_state = np.array(self.state)\r\n continue\r\n\r\n for p in ortho:\r\n if self.to_move != self.board[p[0]][p[1]].color:\r\n if self.board[p[0]][p[1]].liberty == 1:\r\n temp_state[p[0]][p[1]] = 0\r\n\r\n temp_state[pos[0]][pos[1]] = self.to_move\r\n if (temp_state == self.previous_state).all(): # KO RULE CHECK\r\n illegal_moves.add(pos)\r\n temp_state = np.array(self.state)\r\n continue\r\n temp_state = np.array(self.state)\r\n\r\n possible_move_pos = possible_moves - illegal_moves\r\n if custom:\r\n return possible_move_pos\r\n\r\n legal_moves_queue = PriorityQueue()\r\n\r\n for possible_move in possible_move_pos:\r\n move_obj = Move(possible_move, self.to_move, self)\r\n legal_moves_queue.put((-move_obj.priority, move_obj))\r\n return legal_moves_queue", "def find_legal_move(self, g, func, timeout=None):\r\n legal_col = []\r\n for index, col in enumerate(g.col_lst):\r\n if col < g.BOARD_COL - 1:\r\n legal_col.append(index)\r\n chosen_col = random.sample(legal_col, k=1)[0]\r\n if len(legal_col) == 0:\r\n raise Exception(self.MSG_NO_AI_MOVES)\r\n func(chosen_col)\r\n return chosen_col", "def get_move(moves):\n pass", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def takeNaiveMove():\r\n\tnotFound=True\r\n\twhile notFound:\r\n\t\tmove=random.randint(1,9)\r\n\t\tif validMove(move):\r\n\t\t\tnotFound=False\r\n\treturn move", "def get_possible_moves(self) -> list:\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = count >= 0.5 * len(self.claim)\n\n moves = []\n if not over:\n for i in self.letters:\n if i.isalpha():\n moves.append(i)\n return moves", "def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def random_move(self, num_of_moves=1, validate=False):\n for i in range(num_of_moves):\n # check if legal move exists\n if len(self.legal_moves_in_uci) == 0:\n return None\n # get random move\n action = np.random.choice(self.legal_moves_in_uci)\n # apply move\n self.make_move(action, validate=validate)", "def get_legal_moves(self, pos: Position, game_board: GameBoard) -> PossibleMoveSet:\n pass", "def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list", "def computer_move():\n\tmove = random.choice(moves)\n\tprint \"Computer's move is %s\" % move\n\treturn move", "def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]", "def choose_move(self, battle):\n # If the player can attack, it will\n if battle.available_moves:\n # Finds the best move among available ones\n best_move = max(battle.available_moves, key=lambda move: move.base_power)\n return self.create_order(best_move)\n\n # If no attack is available, a random switch will be made\n else:\n return self.choose_random_move(battle)", "def make_safe_move(self):\n #iterate through safe moves until you find one that has not yet been played\n for move in self.safes:\n if move not in self.moves_made:\n return move\n #If we make it through the end of the list, return None\n return None", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list", "def chooseMove(self):\n\t\tlistOfColumns = [0,1,2,3,4,5,6]\n\t\tresult = random.choice(listOfColumns)\n\t\t\n\t\twhile (self.game.isValid(result+1) != True):\n\t\t\tresult = random.choice(listOfColumns)\n\t\treturn result", "def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def get_legal_moves(self, current_player):\n\t\tlegal_moves = []\n\t\tfor row in range(self.board_size):\n\t\t\tfor col in range(self.board_size):\n\t\t\t\tif self.board.repr[row][col] == self.player_symbol[current_player]:\n\t\t\t\t\tposition = (row,col)\n\t\t\t\t\tmove_fn_list = [self.north_move,\n\t\t\t\t\t\t\t\t self.east_move,\n\t\t\t\t\t\t\t\t self.south_move,\n\t\t\t\t\t\t\t\t self.west_move]\n\t\t\t\t\tfor move_fn in move_fn_list:\n\t\t\t\t\t\tmove = move_fn(position)\n\t\t\t\t\t\tif self.is_legal_move(current_player,move):\n\t\t\t\t\t \t\tlegal_moves.append(move)\n\t\t\t\t\t \t\t# now we are going to check for a double jump!\n\t\t\t\t\t \t\tstart = move[0]\n\t\t\t\t\t \t\tcur_end = move[1]\n\t\t\t\t\t \t\tnew_board = copy.deepcopy(self.board)\t# Make a copy of the board, and then make the move on that board\n\t\t\t\t\t \t\tnew_board.movePiece(start,cur_end)\n\t\t\t\t\t \t\tcontinue_move = move_fn(cur_end)\t\t# Try to move again in the same direction\n\t\t\t\t\t \t\tnew_game_state = Game(self.board_size,new_board,current_player)\t\t\t# make a whole new game state and check if our move is legal on that \n\t\t\t\t\t \t\twhile(new_game_state.is_legal_move(current_player, continue_move)):\n\t\t\t\t\t \t\t\tstart_cur = cur_end\n\t\t\t\t\t \t\t\tcur_end = continue_move[1]\n\t\t\t\t\t \t\t\tlegal_moves.append((start,cur_end))\n\t\t\t\t\t\t \t\tnew_board = copy.deepcopy(new_board)\n\t\t\t\t\t \t\t\tnew_board.movePiece(start_cur,cur_end)\n\t\t\t\t\t \t\t\tcontinue_move = move_fn(cur_end)\n\t\t\t\t\t \t\t\tnew_game_state = Game(new_game_state.board_size,new_board,current_player)\n\t\treturn legal_moves", "def get_possible_moves(self):\n moves = []\n for i in range(1, self.current_total + 1):\n if i ** 2 <= self.current_total:\n moves.append(i ** 2)\n\n return moves", "def random_next_action(state):\n\n possible_moves = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n possible_moves.append((i, j))\n return random.choice(possible_moves)", "def get_move(self, game, time_left):\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n return legal_moves[randint(0, len(legal_moves) - 1)]", "def perform_random_move(grid, move=None):\n zpos = grid.index(0)\n move_functions = [move_up, move_down, move_left, move_right]\n test_functions = [try_up, try_down, try_left, try_right]\n valid_functions = [move_functions[i] for i in [0, 1, 2, 3] if test_functions[i](grid)]\n randnum = random.randint(0, len(valid_functions) - 1)\n return grid, valid_functions[randnum](grid, zpos)", "def generate_valid_moves(self):\n #make sure we have a valid roll\n if (self.roll != (0,0)):\n #if doubles, need to do 4 moves\n if (self.roll[0] == self.roll[1]):\n #need to seed the initial moveset\n mv = self.board.find_moveable_pieces(self.roll[0], self.player)\n mv2 = []\n #apply the remaining 3 rolls\n for i in range(0,3):\n for mboard in mv:\n mv2.extend(mboard.find_moveable_pieces(self.roll[0], self.player))\n mv = list(set(mv2))\n mv2 = []\n else:\n #need to condisider d1 then d2 and d2 then d1\n d1d2 = self.board.find_moveable_pieces(self.roll[0], self.player)\n d2d1 = self.board.find_moveable_pieces(self.roll[1], self.player)\n d1d2_2 = []\n d2d1_2 = []\n for mboard in d1d2:\n d1d2_2.extend(mboard.find_moveable_pieces(self.roll[1], self.player))\n for mboard in d2d1:\n d2d1_2.extend(mboard.find_moveable_pieces(self.roll[0], self.player))\n mv = d1d2_2\n mv.extend(d2d1_2)\n self.moves = list(set(mv))", "def get_available_moves(self, board):\n pass", "def chooseMove(playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n \r\n if SHOW_OUTPUT: print(displayBoard(playerBoard, oppBoard, playerSeeds, oppSeeds))\r\n \r\n moves = getValidMoves(playerBoard, oppBoard)\r\n random.shuffle(moves)\r\n return moves[0]", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def availablemoves(moves):\n useravailablemoves = []\n for move in moves:\n useravailablemoves.append(move['move'])\n return(useravailablemoves)", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n \n options = game.get_legal_moves()\n assert options == legal_moves, \"Mismatched moves\"\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n score, move = None, random.choice(legal_moves) if len(legal_moves) > 0 else None\n try:\n # Iterative deepening with Quiessance search:\n if self.iterative is True:\n results = deque(maxlen=3)\n for depth in range (self.search_depth, 25):\n score, move = self.dosearch(game, depth)\n results.append((score, move))\n if self.quiessant_search is True:\n if len(results) >=3 and all(x[1] == move for x in results):\n break\n elif score == float('-inf') or score == float ('inf'):\n break\n if self.time_left() < self.TIMER_THRESHOLD:\n break\n else:\n score, move = self.dosearch(game, self.search_depth)\n assert score is not None\n \n if len (options) > 0:\n assert not (move is None or move is (-1,-1)), \"Move ({}, {}) for '{}/{}' cannot be None or (-1,-1) if options ({}) exist\".format(move, score, self.method, self.score, options)\n assert move in options, \"Move ({}, {}) for '{}/{}' not from existing list of moves ({})\".format(move, score, self.method, self.score, options)\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search\n # (or iterative-deepening search iteration)\n return move", "def random_moves(length):\n ans = \"\"\n for dummy_num in range(length):\n ans += random.choice([\"u\",\"d\",\"l\",\"r\"])\n return ans", "def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available", "def prepare_next_turn(grid):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return any_possible_moves(grid)", "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def get_moves(self):", "def play_ai(**args):\n board = args.get('board')\n available_moves = [position for position, turn in board.items()\n if turn == \" \"]\n idx = random.randint(0, len(available_moves) - 1)\n return available_moves[idx]", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def get_move(self, board, possible_moves):\n next_move = None\n max_score = -float('Inf')\n self.start_time = datetime.now()\n for depth in range(2,3): # iterative deepening\n try:\n for move in possible_moves:\n board_copy = deepcopy(board)\n self.man.play_move(board_copy, move, self.color)\n score = self.minimaxm(depth, board, False)\n if score > max_score:\n max_score = score\n next_move = move\n\n except TimeoutError:\n print(\"ran out of time\")\n break\n return next_move", "def getAction(self, state):\n # collect legal moves and successor states\n legalMoves = state.getLegalActions()\n\n # choose one of the best actions\n scores = [self.evaluationFunction(state, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n\n chosenIndex = random.choice(bestIndices) # pick randomly among the best\n\n return legalMoves[chosenIndex]", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def getAction(self, gameState):\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState.generateSuccessor(0, action))\n for action in legalMoves if action != 'Stop']\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices)\n\n return legalMoves[chosenIndex]", "def getAction(self, gameState):\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n if 'Stop' in legalMoves:\n legalMoves.remove('Stop')\n numFood = len(gameState.getFood().asList())\n ghoststates = gameState.getGhostStates()\n ghost = ghoststates[0]\n timer = ghost.scaredTimer\n if timer > 5:\n newPosits = [gameState.generatePacmanSuccessor(action).getPacmanPosition() for action in legalMoves]\n scores = [manhattanDistance(pos, ghost.getPosition()) for pos in newPosits]\n bestScore = min(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n for ind in bestIndices:\n if legalMoves[ind] == ghost.getDirection():\n return legalMoves[ind]\n return legalMoves[random.choice(bestIndices)]\n\n\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n if numFood > 10:\n for ind in bestIndices:\n if (legalMoves[ind] == 'South') or (legalMoves[ind] == 'West'):\n return legalMoves[ind]\n\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n\n\n\n\n return legalMoves[chosenIndex]", "def get_random_move(game, ship, args = None):\n\n if args is None:\n args = {}\n\n moves = args[\"moves\"] if \"moves\" in args else [\"n\", \"s\", \"e\", \"w\"]\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} Getting random move with moves = {} ... \".format(ship.id, moves))\n\n move = random.choice(moves)\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} move: {}\".format(ship.id, move))\n\n new_position = ship.position.directional_offset(DIRECTIONS[move])\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} new_position: {}\".format(ship.id, new_position))\n\n normalized_position = game.game_map.normalize(new_position)\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} normalized_position {}\".format(ship.id, normalized_position))\n\n cell = game.game_map[normalized_position]\n\n #\n # collision resolution\n #\n if cell.is_occupied:\n remaining_moves = [x for x in moves if move not in x]\n if DEBUG & (DEBUG_NAV): logging.info(\"ship {} collided with ship {} at {} while moving {}. Remaining_moves: {}\".format(ship, cell.ship, normalized_position, move, remaining_moves))\n\n game.collisions.append((ship, cell.ship, move, normalized_position, resolve_random_move)) # args = remaining moves\n return None\n\n #\n # success\n #\n cell.mark_unsafe(ship)\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} Getting random move {}\".format(ship.id, move))\n\n return move", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def getAction(self, gameState):\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n \"Add more of your code here if you want to\"\n self.previousLoc = gameState.getPacmanPosition()\n return legalMoves[chosenIndex]", "def get_move(self, game, legal_moves, time_left):\n logging.debug(\"get_move - legal moves: %s\", str(legal_moves))\n \n self.time_left = time_left\n\n\n # Check if we have any legal moves\n if not legal_moves:\n return (-1, -1)\n\n # Let's set best move so far to be the first legal move so we always \n # have something to return in case of timeout\n self.best_move_so_far = legal_moves[0]\n \n \n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if self.iterative:\n it = 1\n while True:\n if self.method == 'minimax':\n _, self.best_move_so_far = self.minimax(game, it)\n else:\n _, self.best_move_so_far = self.alphabeta(game, it)\n it += 1\n else: \n if self.method == 'minimax':\n _, self.best_move_so_far = self.minimax(game, self.search_depth)\n else:\n _, self.best_move_so_far = self.alphabeta(game, self.search_depth)\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n logging.debug(\"Time is up - get_move returning: %s\", str(self.best_move_so_far))\n return self.best_move_so_far\n\n # Return the best move from the last completed search iteration\n logging.debug(\"get_move returning: %s\", str(self.best_move_so_far))\n\n return self.best_move_so_far", "def prepare_next_turn(grid):\n\tempties = get_empty_cells(grid)\n\ty,x = random.choice(empties)\n\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn any_possible_moves(grid)", "def _policy(self, gameboard):\r\n valid_moves = self._all_valid_moves(gameboard)\r\n _reflex_ = Reflex(self.color)\r\n best_move = None\r\n moves = []\r\n \r\n # step 1, check going to win\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_going_to_win(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n \r\n # step 2, check opponent 4\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_._alter_check_opponent_4(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 3, check opponent 3\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_opponent_3(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 4, winning blocks\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_winning_blocks(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n moves = list(set(moves))\r\n moves.sort(key=lambda x: x[2], reverse=True)\r\n max_count = moves[0][2]\r\n new_moves = []\r\n\r\n for t in moves:\r\n if t[2] < max_count:\r\n break\r\n else:\r\n new_moves.append((t[0], t[1]))\r\n\r\n moves = new_moves.copy()\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 5, random pick one\r\n idx = np.random.choice(len(valid_moves), 1)[0]\r\n return valid_moves[idx]", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n if not (self.field_row*self.color_value == 1 or self.field_row*self.color_value == -6):\n self.pot_moves = {(1*self.color_value, 0)}\n\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n\n for move in self.pot_capture_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def get_legal_nearby_moves(self, nearby_length=1):\n moves = []\n for row, col in self.get_legal_moves():\n if not self._is_nearby_empty(nearby_length, row, col):\n moves.append((row, col))\n\n return moves or None", "def all_valid_moves(self, cur_board, all_english_words):\r\n rack_to_str = self.convert_rack_to_str()\r\n if len(rack_to_str) == 0:\r\n return [] \r\n \r\n moves_made = cur_board.find_words_on_board()\r\n actual_words = []\r\n\r\n if cur_board.board_empty: #no one has played yet.\r\n return Utils.get_all_legal_combos([\"\"], rack_to_str, all_english_words)\r\n\r\n\r\n\r\n \r\n for move in moves_made:\r\n actual_words.append(move[0]) #the first element is a string, representing the actual move made.\r\n \r\n \r\n return Utils.get_all_legal_combos(actual_words, rack_to_str, all_english_words)", "def test_get_legal_moves():\n board = Board(*TEST_AGRU2)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.columns_list = [\n [HUMAN_DISK],\n [HUMAN_DISK, HUMAN_DISK],\n [HUMAN_DISK, HUMAN_DISK, HUMAN_DISK]\n ]\n comp.b.new_disk = (MOVE2)\n assert set(comp.get_legal_moves(comp.b)) == {(1, 0), (0, 1)}", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves" ]
[ "0.82790005", "0.820212", "0.764488", "0.75532055", "0.7404592", "0.73601913", "0.7347978", "0.7325695", "0.73243225", "0.73121375", "0.7256089", "0.7213734", "0.71593404", "0.71414167", "0.7133645", "0.70529675", "0.7036325", "0.69154537", "0.68977284", "0.6889334", "0.6877339", "0.6869339", "0.68677616", "0.6859729", "0.6856743", "0.68455684", "0.6840623", "0.6817695", "0.6815715", "0.6802779", "0.6771906", "0.67643565", "0.674307", "0.6736671", "0.67350876", "0.67282665", "0.6725472", "0.67133904", "0.6704036", "0.6702333", "0.67011386", "0.6692917", "0.6681731", "0.6672752", "0.66651356", "0.6664212", "0.6655499", "0.66295385", "0.6623405", "0.65842485", "0.65813994", "0.657228", "0.6564013", "0.65555054", "0.6516633", "0.65052027", "0.6500642", "0.64814806", "0.64543086", "0.64535564", "0.6421758", "0.64139783", "0.6413895", "0.6412467", "0.640926", "0.6396823", "0.6380706", "0.6378354", "0.6377497", "0.6372028", "0.6361672", "0.63566613", "0.63470304", "0.6343224", "0.6332308", "0.6330632", "0.63286763", "0.6318139", "0.631065", "0.6292032", "0.62882686", "0.6279322", "0.62778556", "0.62465394", "0.6243622", "0.6241709", "0.6232415", "0.62315476", "0.6220981", "0.6210431", "0.6209929", "0.6192076", "0.6187246", "0.61837614", "0.6167054", "0.61664057", "0.61659014", "0.61559325", "0.6151719", "0.6151215" ]
0.80969596
2
Very important. Lets the UI know NOT to give AI player UI control.
def isHuman(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_control(self):\n pass", "def isUIused():\n return False", "def __handle_view_player(self, gamestate_component):", "def __disableControls(self):\n self.ignoreAll()", "def noyable(self):\n return False", "def control_plugin(self):\n pass", "def _control_skip(self):\n self.player.skip()", "def game_play(self):", "def hide_gui():\n pass", "def do_nothing(self, player):\n return '%s spins \\'nun\\' and does nothing.' % (player,)", "def auto_play(self):\n raise NotImplementedError(self)", "def change_player_state(self):\n if self.active_player.get() is True:\n # Get game phase and unlock respective buttons?\n # or should game do that\n pass\n else:\n pass\n #self.disable_all_buttons()", "def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)", "def nothing_playing(self):\n self.state.set_active_player(None)", "def pass_player(self):\n # pass control to next player by asking game who that is\n self.disable_all_buttons()\n self.game.pass_control_next(self)", "async def __check_ai_turn(self) -> None:\n if self.get_current_player().is_ai:\n await asyncio.sleep(2.0)\n await self.__auto_flip_tile()", "def utility(self, state, player):\r\n raise NotImplementedError", "def test_nonVisibilityUnaffected(self):\n self.assertEquals(\n list(self.observer.idea.obtain(\n idea.Proximity(3, idea.ProviderOf(iimaginary.IThing)))),\n [self.observer, self.location, self.rock]\n )", "def set_acquisition_gui(self):\r\n self.startButton.setEnabled(False)\r\n for widget in self.enabled_input_widgets:\r\n widget.setEnabled(False)\r\n self.abortButton.setEnabled(True)\r\n self.flipPumpOnlyPumpProbeButton.setEnabled(True)", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def disableButtons(self):\n self.ui.b_run.setEnabled(False)\n self.ui.b_colour.setEnabled(False)\n self.ui.b_ground_truth.setEnabled(False)\n self.ui.b_vid.setEnabled(False)\n self.ui.b_save.setEnabled(False)\n self.ui.t_low.setEnabled(False)\n self.ui.t_high.setEnabled(False)\n self.ui.t_fps.setEnabled(False)\n self.ui.combo_superpixel.setEnabled(False)\n self.ui.c_super_pixel_video.setEnabled(False)\n self.ui.c_csv.setEnabled(False)\n self.ui.c_draw.setEnabled(False)\n self.ui.c_velocity.setEnabled(False)\n self.ui.c_of.setEnabled(False)\n self.ui.c_back_of.setEnabled(False)\n self.ui.c_depth.setEnabled(False)\n self.ui.c_speed_plot.setEnabled(False)\n self.ui.c_error_plot.setEnabled(False)\n self.ui.c_crash_plot.setEnabled(False)\n self.ui.c_error_plot_video.setEnabled(False)\n self.ui.c_speed_plot_video.setEnabled(False)\n self.ui.c_crash_plot_video.setEnabled(False)\n self.ui.c_optimize.setEnabled(False)\n self.ui.c_object_detection.setEnabled(False)", "def isGUI(self):\n return True", "def behaviors_paused(self) -> bool:", "def user_controlled(self) -> None:\n self.cpu_controlled = False", "def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n ai_settings.initialize_dynamic_settings()\n #hiding mouse cursor\n start_game(ai_settings, screen, stats, ship, aliens, bullets)\n\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()", "def NoPrompt(self) -> bool:", "def newPlayer():\r\n pass", "def player_moves_player(self, x, y):\n activePlayer = self.get_active_player()\n if activePlayer.humanControlled:\n super(RobotGame, self).player_moves_player(x, y)", "def is_hidden():\n return False", "def is_hidden():\n return False", "def _handleInput(self):\n\n Game.Player.running(Game.ControlState[Game.MoveRight], not (Game.ControlState[Game.MoveRight] == Game.ControlState[Game.MoveLeft]))\n Game.Player.jumping(Game.ControlState[Game.Jump])\n Game.Player.flying(Game.ControlState[Game.Fly])\n Game.Player.firing(Game.ControlState[Game.Fire])", "def is_ignored(self):", "def test_if_ui_instance_well_initialized(self):\n ui = UIRender(TestUI.image_path)\n self.assertEqual(ui.run, True)\n self.assertIsInstance(ui.clock, type(pygame.time.Clock()))\n self.assertEqual(ui.game_mode, \"UNKNOWN\")\n self.assertEqual(ui.bottom_player_color, 0)\n self.assertEqual(ui.top_player_color, 0)", "def update(self):\r\n if not self.tr.game_over and self.tr.turn_tracker:\r\n self.computer_play()", "def doNotTrack(self):\n # return False\n return 'lol'", "def disable(self):", "def disable(self): \n self.feed_button.config(state=\"disabled\")\n self.eat_button.config(state=\"disabled\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"disabled\") \n self.add_population_button.config(state=\"disabled\")\n self.add_body_size_button.config(state=\"disabled\")", "def prepare_UI(self):", "def control():\n pass", "def exitNone(self):\n self.myStoryMap.title['state'] = DGG.DISABLED", "def disable(self) -> None:", "def player(self):\n legal = self.board.legal_move(self.black)\n if(len(legal) == 0):\n self.p_no_move = 1\n print(\"No legal move for player!\")\n self.computer_turn = True\n self.player_turn = False", "def is_visible(self):", "def is_actor():\n return False", "def hidden():\n return False", "def __handle_view_win_condition(self, gamestate_component):", "def test_empty_ui(self):", "def disable_game():\n global frames\n for x in frames:\n for y in x.winfo_children():\n y.config(state='disabled')", "def play(ai, human_player=None):\n\n # If no player order set, choose human's order randomly\n if human_player is None:\n human_player = random.randint(0, 1)\n\n # Create new game\n game = Nim()\n\n # Game loop\n while True:\n\n # Print contents of piles\n print()\n print(\"Piles:\")\n for i, pile in enumerate(game.piles):\n print(f\"Pile {i}: {pile}\")\n print()\n\n # Compute available actions\n available_actions = Nim.availableActions(game.piles)\n time.sleep(1)\n\n # Let human make a move\n if game.player == human_player:\n print(\"Your Turn\")\n while True:\n pile = int(input(\"Choose Pile: \"))\n count = int(input(\"Choose Count: \"))\n if (pile, count) in available_actions:\n break\n print(\"Invalid move, try again.\")\n\n # Have AI make a move\n else:\n print(\"AI's Turn\")\n pile, count = ai.chooseAction(game.piles, use_epsilon=False)\n print(f\"AI chose to take {count} from pile {pile}.\")\n\n # Make move\n game.move((pile, count))\n\n # Check for winner\n if game.winner is not None:\n print()\n print(\"GAME OVER\")\n winner = \"Human\" if game.winner == human_player else \"AI\"\n print(f\"Winner is {winner}\")\n return", "def none(self):", "def disable_btns(self):\n self.saveBtn.setEnabled(False)\n self.openVideoBtn.setEnabled(False)\n self.openAnnotationBtn.setEnabled(False)\n self.resetBtn.setEnabled(False)\n self.speedCombo.setEnabled(False)\n self.newFileBtn.setEnabled(False)\n self.HelpBtn.setEnabled(False)", "def check_trying_using(self):\r\n if self.opportunity or 'key' in inventory:\r\n if self.rect.colliderect(player):\r\n music_acceptor.usingPortalSound()\r\n player.rect.x = random.randrange(75, WIDTH - 125)\r\n player.rect.y = random.randrange(25, HEIGHT - 100)", "def beforeUpdate(self):", "def untargeted(self):\n\t\tpass", "def isInit(this):\n\t\treturn not not this._CAP\n\t\t# Who's here ?\n\t\t# - Me, I kill you.", "async def pause_behaviors(self) -> None:", "def ignore(self, event):\n return not self.active", "def hide_main_buttons(self):\n pass", "def thisIsIA():\r\n global player\r\n cases_disp = getCaseDisp()\r\n ia_choice = random.randint(0,len(cases_disp)-1)\r\n # choice = cases_disp[ia_choice] # pour aleatoire\r\n choice = simulaIA()[0] #min max implementation\r\n #print(choice)\r\n Play(choice[0],choice[1],player)\r\n newPlayer()\r\n winner = Victoire() # on stock le resultat actuel \r\n if (winner or MatchNul()):\r\n Dessine(winner) #on dessine\r\n Window.update() # maj de la fenetre avant la fin du tour de tkinter\r\n Window.after(3000) # pause de 3secondes\r\n ResetGame(winner) #on met les cases a 0\r\n Dessine(winner) # on dessine la couleur du gagnant\r\n return\r\n Dessine(winner) # on dessine le jeu\r", "def play_game():\n pass", "def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)", "def won(self):\r\n return None", "def not_test_without_user(self):\n # TODO", "def player_movement(self):", "def player(network, event) :\n\twhile event.is_set() :\n\t\t_, _, (V, P) = network.act()\n\t\t# print V, P\n\t\tnetwork.env.render()\n\t\tif (network.env.done) :\n\t\t\tnetwork.reset_game()\n\t\ttime.sleep(0.1)", "def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)", "def modifyComponentsNotPreferableOnServer(self):\n # Nothing to do\n pass", "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def play(self):\n pass", "async def game(self):\n pass", "def user_control(board, x_player, y_player, button_pressed, inventory):\n\n red = '\\033[31m'\n reset_color = '\\033[0m'\n item_colors = {\n '●': '\\033[33m', '⚛': '\\033[34m', '✿': '\\033[31m', '✡': '\\033[94m',\n '♦': '\\033[32m', 'ᴥ': '\\033[31m', '☀': '\\033[33m'}\n place_on_right_side = board[y_player][x_player + 1]\n place_on_left_side = board[y_player][x_player - 1]\n place_on_up_side = board[y_player - 1][x_player]\n place_on_down_side = board[y_player + 1][x_player]\n places_prohibited_to_stand_on = [\n 'X', red + '#' + reset_color, '☹', '☃', '♞', '☻', '☬', item_colors['☀'] + '☀' + reset_color, red\n + '☀' + reset_color]\n\n if button_pressed == 'd' and place_on_right_side not in places_prohibited_to_stand_on:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side not in places_prohibited_to_stand_on:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side not in places_prohibited_to_stand_on:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side not in places_prohibited_to_stand_on:\n y_player += 1\n\n friends = ['☹', '☃', '♞', '☻', '☬']\n # conditions for level 4 (feeding friends)\n if button_pressed == 'd' and place_on_right_side in friends and inventory['●'] > 19:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side in friends and inventory['●'] > 19:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side in friends and inventory['●'] > 19:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side in friends and inventory['●'] > 19:\n y_player += 1\n return x_player, y_player", "def set_not_ready(self):\n if self.game.has_started() or self.status == self.PLAYER_NOT_READY:\n return\n self.status = self.PLAYER_NOT_READY", "def preliminary_check_controls(self):\n\n # is the program still in a binding state?\n if self.is_binding:\n self.error_msg['text'] = 'You are still binding'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # are the controls set all unique?\n elif len({\n self.controller.slide_up_control,\n self.controller.slide_down_control,\n self.controller.slide_left_control,\n self.controller.slide_right_control\n }) != 4:\n self.error_msg['text'] = 'All controls must be unique'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # all tests passed?\n else:\n # save to file - do this\n\n # move to main menu frame\n self.controller.show_frame(MainMenu)", "def game_allowed(self, uid=0):\n return True", "def setup_game(self):", "def pause(self):\n self.entry['state']=DGG.DISABLED\n self.ignoreAll()", "def __init__(self, player):\n self.player = player", "def play(self, player, game): \n super().play(player, game)\n game.set_action(\"SLEEP_CODER\")", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def interact(self):\r\n pass", "def __setup_ui_controls(self):\n self.scene.append_to_caption('\\n')\n\n # Button to reset camera\n btn_reset = button(\n bind=self.__reset_camera, text=\"Reset Camera\")\n self.__ui_controls.btn_reset = btn_reset\n self.scene.append_to_caption('\\t')\n\n chkbox_cam = checkbox(\n bind=self.__camera_lock_checkbox,\n text=\"Camera Lock\", checked=self.__camera_lock)\n self.__ui_controls.chkbox_cam = chkbox_cam\n self.scene.append_to_caption('\\t')\n\n chkbox_rel = checkbox(\n bind=self.__grid_relative_checkbox,\n text=\"Grid Relative\", checked=self.__grid_relative)\n self.__ui_controls.chkbox_rel = chkbox_rel\n self.scene.append_to_caption('\\n\\n')\n\n # Button to clear the screen\n btn_clr = button(bind=self.clear_scene, text=\"Clear Scene\")\n self.__ui_controls.btn_clear = btn_clr\n self.scene.append_to_caption('\\n\\n')\n\n # Checkbox for grid visibility\n chkbox_grid = checkbox(\n bind=self.__grid_visibility_checkbox, text=\"Grid Visibility\",\n checked=self.__grid_visibility)\n self.__ui_controls.chkbox_grid = chkbox_grid\n self.scene.append_to_caption('\\t')\n\n # Prevent the space bar from toggling the active checkbox/button/etc\n # (default browser behaviour)\n self.scene.append_to_caption('''\n <script type=\"text/javascript\">\n $(document).keyup(function(event) {\n if(event.which === 32) {\n event.preventDefault();\n }\n });\n </script>''')\n # https://stackoverflow.com/questions/22280139/prevent-space-button-from-triggering-any-other-button-click-in-jquery\n\n # Control manual\n controls_str = '<br><b>Controls</b><br>' \\\n '<b>PAN</b><br>' \\\n 'SHFT + LMB | <i>free pan</i><br>' \\\n 'W , S | <i>up / down</i><br>' \\\n 'A , D | <i>left / right</i><br>' \\\n '<b>ROTATE</b><br>' \\\n 'ARROWS KEYS | <i>rotate direction</i><br>' \\\n 'Q , E | <i>roll left / right</i><br>' \\\n '<b>ZOOM</b></br>' \\\n 'MOUSEWHEEL | <i>zoom in / out</i><br>' \\\n '<script type=\"text/javascript\">var arrow_keys_handler = function(e) {switch(e.keyCode){ case 37: case 39: case 38: case 40: case 32: e.preventDefault(); break; default: break;}};window.addEventListener(\"keydown\", arrow_keys_handler, false);</script>' # noqa\n # Disable the arrow keys from scrolling in the browser\n # https://stackoverflow.com/questions/8916620/disable-arrow-key-scrolling-in-users-browser\n self.scene.append_to_caption(controls_str)", "def event_beforehide(self):\n logging.warning('beforehide undefined')", "def start_of_game(self):\n pass", "def play(self, player, game):\n super().play(player, game)\n game.set_action(\"PICKUP_CODER\")", "def __init__(self):\n\t\tself.playercolider()", "async def skip(self):\n await self.play()", "def _check_play_button(self, mouse_pos):\n\n # If the player clicks the play button AND the game isn't going\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n\n # reset the game stats and dynamic settings\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n self.sb.prep_score()\n\n # get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n\n # recenter player\n self.ship.center_ship()\n\n # hide the mouse cursor\n pygame.mouse.set_visible(False)", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def testPlaybackMechanism(self):\n\t\tx = BaseAction('x')\n\t\tself.failIf(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failIf(x.playbackPolicy.isReadyForRemoval)\n\t\tx.playback()\n\t\tself.failUnless(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failUnless(x.playbackPolicy.isReadyForRemoval)", "def testOnePlaybacksIsTheDefault(self):\n\t\tpolicy = MinimumPlaybackPolicy()\n\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)\n\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def _inactive(self):\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_COUNTDOWN\n self._game = Gameplay()\n self._last = self._touch", "def skip_connect_your_computer_screen(self):\n if self.driver.wait_for_object(\"connect_your_computer_title\", timeout=10, raise_e=False):\n self.driver.click(\"connect_your_computer_not_now_btn\")", "def player_clicked(self, player):\n self.chosen_player = player\n self.setEnabled(False)\n for (_, _, button) in self.buttons:\n button.setEnabled(False)\n dialog = OpenByPlanetName(self, player)\n dialog.exec()\n self.setEnabled(True)\n for (_, _, button) in self.buttons:\n button.setEnabled(True)", "def allowNoneInternalLabel(self):\n return self.isAllowedInternalLabel(None)", "def allowNoneInternalLabel(self):\n return self.isAllowedInternalLabel(None)", "def noCondition(self):\n result = Activatable(self.effects).canActivate(self.game)\n self.assertTrue(result, \"The Activatable should be activatable\")", "def gameTic(self):\n boxId = -1\n\n if self.currentplayer == 1:\n boxId = self.player1.play(self.gameState, self.currentplayer)\n if self.currentplayer == -1:\n boxId = self.player2.play(self.gameState, self.currentplayer)\n\n if self.validmove(boxId):\n self.makemove(boxId)\n self.checkWin()", "def disable_buttons(self):\n\t\tself.cancel.set_sensitive(False)\n\t\tself.logout.set_sensitive(False)\n\t\tself.suspend.set_sensitive(False)\n\t\tself.reboot.set_sensitive(False)\n\t\tself.shutdown.set_sensitive(False)", "def show_playing(self):\n\n print(\"show_playing needs implementation\")", "def _control_pause(self):\n self.player.pause()" ]
[ "0.629158", "0.62601733", "0.62392414", "0.6203768", "0.6019735", "0.60026574", "0.59875256", "0.5963308", "0.595724", "0.59545267", "0.59274596", "0.59116316", "0.5896117", "0.5860238", "0.5832094", "0.5797094", "0.57889205", "0.57613266", "0.571739", "0.5679264", "0.5675429", "0.5662583", "0.5655375", "0.56169254", "0.5610672", "0.5609555", "0.56041515", "0.55936646", "0.55880404", "0.55880404", "0.5574266", "0.556823", "0.55670154", "0.55629855", "0.5555833", "0.5548501", "0.5546592", "0.5535596", "0.55251276", "0.55204356", "0.55142206", "0.5507858", "0.55011296", "0.54925174", "0.54917514", "0.54883033", "0.54823995", "0.5469816", "0.54558647", "0.54518723", "0.5442624", "0.54334915", "0.54273957", "0.5426118", "0.54202497", "0.5418611", "0.54184014", "0.54157394", "0.54146475", "0.5402984", "0.54025334", "0.54014474", "0.5397464", "0.539295", "0.538574", "0.5382525", "0.53824043", "0.53816295", "0.5371021", "0.5358866", "0.53580356", "0.53534836", "0.53481674", "0.53445727", "0.5337659", "0.5336691", "0.5327632", "0.5324662", "0.5323198", "0.5321092", "0.5319133", "0.5317378", "0.5316104", "0.53090537", "0.53035814", "0.53015566", "0.5297945", "0.52978563", "0.52978563", "0.5295064", "0.5289694", "0.5280506", "0.5276249", "0.52729553", "0.52676487", "0.52676487", "0.52587414", "0.52547526", "0.5252954", "0.5250054", "0.524544" ]
0.0
-1
String representation for a random player. Used for writing results filenames.
def __str__(self): return "{}_random".format(self.index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n \n is_random_print = \"\"\n if self.is_random == True:\n is_random_print = \"randomly\"\n else:\n is_random_print = \"deterministically\"\n\n return \"Player for \" + self.side + \", ply = \" + str(self.ply) + \", breaks ties \" + is_random_print", "def __repr__(self):\n if self.type == Player.HUMAN:\n return(\"Human\")\n elif self.type == Player.RANDOM:\n return (\"Random\")\n elif self.type == Player.MINIMAX:\n return (\"Minimax\")\n elif self.type == Player.ABPRUNE:\n return (\"ab Pruning\")\n elif self.type == Player.CUSTOM:\n return \"Q-Learner\"\n elif self.type == Player.MIX:\n return \"MIX\"", "def __repr__(self):\n return str(self._player) + str(self._num)", "def __str__(self):\n return \"Player: {}\".format(self.name)", "def to_string(self):\n return \"Moving randomly\"", "def __str__(self):\n return \"player: \" + str(self.player) + \"\\nposition: \" + str(self.position) + \"\\naccepted: \" + str(self.accepted) + \"\\ndirections enclosing: \" + str(self.directions_enclosing) + \"\\nfinal: \" + str(self.final)", "def __repr__(self):\n return 'Player({!r}, {!r}, {!r})'.format(\n self.name,\n self.hand,\n self.score\n )", "def getPlayerFormat(self):\r\n return self.player + \"\\t\"", "def __str__(self) -> str:\n return '{}'.format(self.letter if self.player == 0 else self.player)", "def __repr__(self):\n return '<Player %s: %s>' % (self.username, repr(self.stats()))", "def __repr__(self):\n s = \"Player for \" + self.ox + \"\\n\"\n s += \" with tiebreak type: \" + self.tbt + \"\\n\"\n s += \" and ply == \" + str(self.ply) + \"\\n\\n\"\n return s", "def __repr__(self):\n return f\"Player({self.hand}, {self.total}, {self.aceCount}, {self.dealer})\"", "def __repr__(self):\n s = 'Player ' + self.checker\n\n return s", "def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)", "def __str__(self):\n return self.playername", "def getPlayerFilename(self):\n if (self.__playerName != \"???\"):\n return self.__filename\n else:\n return \"\"", "def __str__(self):\n res = \"Opponent: \" + str(self.opponent) + \" Angle: \" + str(self.angle)\n return res", "def __str__(self):\n return f\"This player has {self.hand} for a current total of {self.total} and {self.aceCount} Aces \" \\\n f\"valued at a soft 11. This player is a dealer: {self.dealer}.\"", "def __repr__(self):\n return \"Player('{}', {})\".format(self.name, self.tendency.__repr__())", "def __repr__(self):\n name = self.name\n prefix = \": \"\n gen = (\n value for value in self.init_kwargs.values() if value is not None\n )\n for value in gen:\n try:\n if issubclass(value, Player):\n value = value.name\n except TypeError:\n pass\n name = \"\".join([name, prefix, str(value)])\n prefix = \", \"\n return name", "def __repr__(self):\n out = ''\n out += f'\\nPlayer {self.number}: {self.name}\\n'\n\n # checks for trail options before printing.\n if len(self.trail_options) > 0:\n out += f'\\nTrail Options:\\n'\n for item in self.trail_options:\n out += f' {item}'\n else:\n out += f'\\nSadly, {self.name} is out of trail options.\\n'\n\n # checks for supplies before printing.\n if len(self.supplies) > 0:\n out += f'\\nSupplies:\\n'\n for item in self.supplies:\n out += f' {item[0]}\\n'\n else:\n out += f'\\nSadly, {self.name} is out of supplies.\\n'\n\n return out", "def __repr__(self):\n\n return f\"Player(name={self.name}, score={self.total_points})\"", "def announce_player(self) -> str:\n return f\"Player Name: {self.name}, Team name: {self.team}, Number: {str(self.number)}\"", "def __repr__(self):\r\n s = 'Player ' + self.checker + ' (' + self.tiebreak + ', ' + str(self.lookahead) + ')'\r\n return s", "def __str__(self):\n result = \", \".join(map(str, self.hand))\n result += \"\\n \" + str(self.get_score()) + \" points\"\n return result", "def __repr__(self):\r\n c = \"Player \" + self.checker + \" (\" + self.tiebreak + \", \" + str(self.lookahead) + \")\"\r\n return c", "def stringify(self):\n string = self.chars[\"type\"] + \" \"\n \n # current hearts\n for _ in range(self.hearts):\n string += self.chars[\"heart\"]\n\n # dead hearts\n for _ in range(3 - self.hearts):\n string += self.chars[\"noheart\"]\n\n return string", "def nice_output(self):\n return 'Pitch: {0} at {1}: {2}'.format(\n self.pitch_type, self.start_speed, self.des)", "def generate_results_string(player_list, singular_result, plural_result):\n string = \"\"\n plural = len(player_list) > 1\n player_number = 1\n if len(player_list) != 0:\n string += \"Player \"\n for player in player_list:\n string += player.get_name()\n if player_number < len(player_list) - 1:\n string += \", \"\n elif player_number < len(player_list):\n string += \" & \"\n player_number += 1\n if plural:\n string = string[:6] + \"s\" + string[6:] + plural_result\n else:\n string += singular_result\n return string", "def __str__(self):\n if self.showOneCard:\n return str(self.cards[0])\n else:\n return Player.__str__(self)", "def __str__(self):\n return (\"UUID: \" + str(self.uuid) + \"\\n\"\n \"Data: \" + str(self.data) + \"\\n\" +\n \"Tex: \" + str(self.texOutput) + \"\\n\")", "def getDescription(self):\n return \"GGP Players (*.player)\"", "def __str__(self):\n return \"UID {0}, Key {1}, Cipher {2}, PRNG {3}\".format(hex(self.uid), \n hex(self.key), hex(self.cipher), hex(self.prng))", "def __repr__(self):\r\n s = 'Player ' + str(self.checker)\r\n v = ' ('+ self.tiebreak+', '+str(self.lookahead)+')'\r\n s += v\r\n return s", "def __repr__(self):\n stringrepr = self.__class__.__name__ + \" PRNG. seed: \" + \\\n str(self.baseseed) + \" counter: \" + str(self.counter) + \\\n \" randbits_remaining: \" + str(self.randbits_remaining)\n return stringrepr", "def str_players_with_hand(self):\n message = \"Players and their hands\\n\\n\" + self.bold_message(self.dealer.str_with_hand()) + \"\\n\"\n for player in self.players:\n if isinstance(player, user.User):\n message += player.str_with_hand() + \"\\n\"\n return message", "def __str__(self):\n return \"{} : {}\".format(self._team_name, self._win_ratio)", "def __str__(self):\n return Hand.__str__(self) + '\\nHand Rank: ' + self.get_full_label()", "def to_json(self):\n player = {\n 'name': self.name,\n 'colour': self.colour,\n 'gender': self.gender,\n 'uid': self.UID,\n 'position': self.position,\n 'money': self.money,\n 'ready': self.is_ready(),\n }\n return player", "def __str__(self):\n return f'Character name: {self.name}\\nhealth: {self.health}\\n' \\\n f'strength: {self.strength}\\nchance dodge: ' \\\n f'{round(self.chance_dodge, 2)}\\nchance critical:' \\\n f' {round(self.chance_critical, 2)} '", "def to_string(self):\n\n return '[[%s], [%s]], [%d, %d], [%s], %s, %s, [%s]' % \\\n (', '.join(INT2STRING_CARD[h] for h in self.hand[0]),\n ', '.join(INT2STRING_CARD[h] for h in self.hand[1]),\n self.pot[0], self.pot[1],\n ', '.join(INT2STRING_CARD[p] for p in self.pub),\n INT2STRING_PHASE[self.phase],\n INT2STRING_PLAYER[self.player],\n ', '.join(INT2STRING_STATUS[s] for s in self.status))", "def name(self) -> str:\n try:\n return self.stats[\"Player name\"]\n except KeyError as ke:\n logger.debug(ke, exc_info=True)\n logger.warn(\"unable to get player name\")\n return \"\"", "def __str__(self):\n struct_repr = \", \".join([\n \"w: \" + str(self.w),\n \"x: \" + str(self.x),\n \"y: \" + str(self.y),\n \"z: \" + str(self.z)\n ])\n\n return f\"Quaternion: [{struct_repr}]\"", "def __str__(self):\n if self._rank is None:\n rank_str = \"\"\n else:\n rank_str = str(self._rank + 1)\n\n if self._file is None:\n file_str = \"\"\n else:\n file_str = chr(self._file + 97)\n\n return file_str + rank_str", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def __str__(self):\n string = \"Hand contains \"\n h = self.hand\n \n for i in range(len(h)):\n string += str(h[i].get_suit()) + str(h[i].get_rank()) + \" \"\n \n return string", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n return str(self.rank)+str(self.suit)", "def __str__(self):\n\n if self.sampling is not None:\n strme = \"move volume {} {} {}\"\\\n .format(self.key, self.sampling, self.pfreq)\n else:\n strme = \"move volume {} {}\".format(self.key, self.pfreq)\n\n return strme", "def player_id(self):\n \n return 'audio-player-%s' % hash((time.time(),))", "def printPlayerStats(self):\n\t\tplayerStats = ['Name = ' + self.name, \n\t\t\t\t\t 'Agility = ' + str(self.agility), \n\t\t\t\t\t 'Personality = ' + str(self.personality), \n\t\t\t\t\t 'Sanity = ' + str(self.sanity), \n\t\t\t\t\t 'Strength = ' + str(self.strength), \n\t\t\t\t\t 'Progress = ' + str(self.progress)]\n\t\tprint playerStats", "def generate_message(self):\n\t\tmsg = \"\"\n\t\tfor idx, player in enumerate(self.players, start=1):\n\t\t\tmsg += f\"Player {idx} - {player.display_name}\\n\"\n\t\tmsg += (\n\t\t\tf\"\\nClick the `Join Game` button to join. Up to {self.max_players} players can join. \"\n\t\t\t\"To start with less than that many, use the `Start Game` button to begin.\"\n\t\t)\n\t\treturn msg", "def __str__(self):\n return \"{} of {}\".format(self.rank,self.suit)", "def __repr__(self):\r\n return \"Username: \" + str(self.username) + \\\r\n \", Video count in metadata: \" + str(self.video_count) + \\\r\n \", Videos IDs found: \" + str(len(self.videos))", "def __repr__(self) -> str:\n return f'{self.name}|{self.hp}|{self.mp}'", "def __repr__(self) -> Any:\n game_board = self.__str__() + \"\\n\"\n current_player_info = \"Is p1 the current player? \" + str(self.p1_turn)\n result = game_board + current_player_info\n return result", "def __repr__(self):\n repr = \"<BBPlayer %s at %s>\" % (self.name, hex(id(self)))\n return repr", "def player(self):\n # type: () -> string_types\n return self._player", "def __str__(self):\n return str(self.rank) + \" of \" + self.suit", "def __str__(self):\n string = 'input dim: {} \\noutput dim: {} \\n'.format(\n self.dim_inputs, self.dim_outputs\n )\n string += 'sequence length: {} \\n'.format(\n self.tensors[0].shape[1]\n )\n key = 'train' if self.train else 'test'\n string += '{}_samples: {} \\n{}_sequences: {} \\n'.format(\n key, self.experiment_length, key, self.tensors[0].shape[0]\n )\n return string", "def __str__(self):\n return '<TuebingenMEG: %i samples, %i timepoints, %i channels>' \\\n % (self.nsamples, self.ntimepoints, len(self.channelids))", "def single_temp() -> str:\n return '36.' + str(random.randint(1, 5))", "def getOpponentFormat(self):\r\n return self.opponent + \"\\t\"", "def __str__(self):\n return f\"{self.rank.title()} of {self.suit.title()}\"", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def random_teampreview(self, battle: AbstractBattle) -> str:\n members = list(range(1, len(battle.team) + 1))\n random.shuffle(members)\n return \"/team \" + \"\".join([str(c) for c in members])", "def gen_dynstring(self):\n # a timestamp plus something semi random\n return '%s.%s' % (time.strftime('%m%d%H%M%S', (time.localtime())),\n random.randint(1, 100000))", "def __str__(self):\n return ', '.join([self.yftf_data, self.info_hash, str(self.num_pieces), str(self.peers)])", "def _get_rand_str(self):\n rand_components = []\n for key, (min_val, max_val) in self.randomargs.items():\n val = random.uniform(min_val, max_val)\n if type(min_val) is int and type(max_val) is int:\n val = int(val)\n rand_components.append(f\"{key}={str(val)}\")\n return \",\".join(rand_components)", "def board_string(self, players):\n if len(self.user_guess) == 1:\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += \"-------------------\\n\"\n\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0].get_name()}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1].get_name()}: {self.user_guess[1]}, {self.applied_guess[1]}\\n\"\n board += \"-------------------\\n\"\n\n return board", "def __str__(self):\n prob = str(round(self.probability, 5))\n dprob = str(round(self.postdProbability, 5))\n output = \"dprob: \" + dprob + \" \\tprob: \" + prob + \"\\t: \"\n for key in self.attackDict.keys():\n output += key + \" \"\n return output", "def name(who):\r\n if who == 0:\r\n return 'Player 0'\r\n elif who == 1:\r\n return 'Player 1'\r\n else:\r\n return 'An unknown player'", "def getName(self):\n\n return self.player", "def __str__(self):\n return '{0} of {1}'.format(Card.rank_names[self.rank], Card.suit_names[self.suit])", "def __str__(self):\n return '{0} of {1}'.format(Card.rank_names[self.rank], Card.suit_names[self.suit])", "def get_name(self):\n return self._player_name", "def get_current_player_name(self) -> str:\n if self.p1_turn:\n return 'p1'\n return 'p2'", "def to_string(self):\n return self.dungeon_string", "def __str__(self):\n return f\"{self.face} of {self.suit} with a value of {self.value}\"", "def __str__(self):\r\n return self.suit + self.rank", "def __str__(self):\n return(' Spot\\n'\n '\\tPosition:\\n'\n f'\\tX: {self.x}\\n'\n f'\\tY: {self.y}\\n'\n f'\\tZ: {self.z}\\n'\n '\\tRotation\\n'\n f'\\tX: {self.rotX}\\n'\n f'\\tY: {self.rotY}\\n'\n f'\\tZ: {self.rotZ}\\n'\n )", "def __str__(self):\n return \"Name: \" + self._name + \"\\nScores: \" + \\\n \" \".join(map(str, self._scores))", "def to_str(self) -> str:\n private = self.privacy == 1\n owners: List[str] = []\n for o in self.owners:\n user = self.bot.get_user(o)\n owners.append(user.name if user else \"Unknown user\")\n return f\"Wormhole: {self.name}\\n┗━▷ Private: {private} - Admins: {', '.join(owners)} - **{self.channels}** Discord channels are linked\"", "def __repr__(self):\n return \"\\nSprite info: \" + self.name + \"\\nx = {0}\\ny = {1}\\nhealth = {2}\\nstrength = {3}\\nloot = {4}\\n\"\\\n .format(self.x, self.y, self.health, self.strength, self.loot)", "def __str__(self):\n #Create dictionary for face cards\n translate = {11:'Jack', 12:'Queen', 13:'King', 14: 'Ace'}\n r = self._rank\n #check for face card\n if r in [11, 12, 13, 14]:\n myrank = translate[r]\n else:\n myrank = str(r)\n return myrank + \" of \" + self._suit", "def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def __setPlayerFilename(self):\n if self.__playerName != \"???\":\n l=self.__playerName.rsplit(\" \")\n nameWithoutSpaces=\"_\".join(l)\n self.__filename = fileLocs.playerProfiles+\"\\\\\"+nameWithoutSpaces+r\".p\"", "def __repr__(self: object) -> str:\n measstring: str = \"Tatort - {:04d} - {} - {} - {} - {}\".format(self.episode_id, self.episode_name, self.episode_inspectors, self.episode_sequence, self.episode_broadcast)\n return measstring", "def to_string(self):\n\n if self.player == CHANCE: # is chance\n return ', '.join(INT2STRING_CARD[c] for c in self.deal)\n else: # is not chance\n return INT2STRING_ACTION[self.action]", "def __str__(self):\n out = \"{}.\".format(self.move_number)\n if self.white.san != \"\":\n out += \" \" + str(self.white)\n else:\n out += \"..\"\n if self.black.san != \"\":\n out += \" \" + str(self.black)\n if self.comment:\n out += \" {\" + self.comment + \"}\"\n return out", "def __str__(self): \n \n return self.suit + self.rank", "def __str__(self):\n return self._last_opponent", "def to_str(self) -> str:\n perms = (\n \"Write and Read\"\n if self.perms == \"wr\"\n else \"Read\"\n if self.perms == \"r\"\n else \"Write\"\n )\n return f\"Channel: <#{self.channelID}>\\n┗━▷ Linked to **{self.wh}** - Permissions: *{perms}*\"", "def __str__(self):\n string = \"Deck contains \"\n\n for i in range(len(self.deck)):\n string += str(self.deck[i].get_suit()) + str(self.deck[i].get_rank()) + \" \"\n return string", "def __str__(self):\n table_data = [\n ['', 'C', 'G', 'A', 'T'],\n ['total', str(self.total['C']), str(self.total['G']), str(self.total['A']), str(self.total['T'])],\n ['reverse half strand', str(self.reverse['C']), str(self.reverse['G']), str(self.reverse['A']),\n str(self.reverse['T'])],\n ['forward half strand', str(self.forward['C']), str(self.forward['G']), str(self.forward['A']),\n str(self.forward['T'])]\n ]\n table = AsciiTable(table_data)\n return \"Generation #{}\\n\".format(self.epoch) + table.table", "def print_player_info(self):\n\t\tclear_screen()\n\n\t\tprint(\"# PLAYER INFO #\\n\")\n\t\tprint(\"Name{:.>17} \".format(self.info['Name']))\n\t\tprint(\"Race{:.>17} \".format(self.info['Race']))\n\t\tprint(\"Level{:.>16} \".format(self.stats['Level']))\n\t\tprint(\"Hit Points{:.>11} \".format(self.stats['HP']))\n\t\tprint(\"Gold Pieces{:.>10} \".format(self.stats['GOLD']))\n\t\n\t\tpress_enter()", "def generate_producer_name():\n return movie_producer_surnames[random.randint(0, len(movie_producer_surnames) - 1)] + \" \" + movie_producer_surnames[random.randint(0, len(movie_producer_surnames) - 1)]", "def get_player_name(self):\n return self._player_name" ]
[ "0.7373667", "0.71827644", "0.70852", "0.69778293", "0.69040483", "0.68909234", "0.6811406", "0.6797799", "0.6739397", "0.673668", "0.6681964", "0.66097915", "0.6596855", "0.658164", "0.6565075", "0.64829636", "0.6482711", "0.64633006", "0.64110553", "0.6410214", "0.6378095", "0.63684297", "0.629823", "0.6289816", "0.612884", "0.6116562", "0.61127436", "0.6111311", "0.6102965", "0.609162", "0.6085754", "0.6072979", "0.60673195", "0.60605264", "0.6058458", "0.60522413", "0.6039757", "0.60174364", "0.60073346", "0.6006034", "0.60034835", "0.6000418", "0.5998494", "0.598956", "0.59887326", "0.59844667", "0.59764796", "0.5970785", "0.596426", "0.5954844", "0.595103", "0.5948352", "0.59414744", "0.5934815", "0.59150225", "0.5913866", "0.590119", "0.5898231", "0.5889459", "0.5862595", "0.5860943", "0.5852926", "0.58513117", "0.5841902", "0.58329445", "0.5816344", "0.58145046", "0.5809985", "0.57859886", "0.57842934", "0.5781801", "0.57804793", "0.5779944", "0.5776383", "0.5776383", "0.57733095", "0.5768743", "0.57663465", "0.5765477", "0.5757846", "0.57522774", "0.5750185", "0.5737686", "0.5736952", "0.5731932", "0.57289654", "0.5727055", "0.57258224", "0.5724505", "0.5719306", "0.5718246", "0.5716911", "0.5708694", "0.570848", "0.570107", "0.5697704", "0.56835306", "0.568132", "0.5679843", "0.5678637" ]
0.59313565
54
Replace multiple assignment with single assignments.
def visit_Assign(self, node): self.generic_visit(node) is_multiple = len(node.targets) > 1 is_compound = any(map(is_sequence_node, node.targets)) is_simple = not is_compound if is_simple and is_multiple: return self.visit_simple_assign(node) elif is_compound and (is_multiple or is_sequence_node(node.value)): return self.visit_compound_assign(node) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def assign(self, name, values):\n self._assignments[name] = values", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def convert_assignments(self, exprs):\n boolean = self.model.get_units_by_name('cellml:boolean')\n for expr in exprs:\n if isinstance(expr, mathml_apply):\n# print 'Converting? assignment', element_xpath(expr)\n if self.special_conversions:\n self.try_convert(self._check_special_conversion, expr)\n self.try_convert(expr._set_in_units, boolean)", "def parassign(self, p):\n out = []\n out.extend(self.assign([p[0], p[2]]))\n out.extend(self.assign([p[1], p[3]]))\n return out", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def tuples(self):\n self[:] = map(tuple, self)", "def test_swap_assignment():\n x,y = 5,10\n yield (x,y)\n x,y = y,x # no ref-counting here\n yield (x,y)", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def transferResidueAssignments(residueA,residueB):\n \n resonancesA = getResidueResonances(residueA)\n\n for resonance in resonancesA:\n assignResonanceResidue(resonance, residueB)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def assign_from_values(var_names_to_values):\n feed_dict = {}\n assign_ops = []\n\n for var_name in var_names_to_values:\n var_value = var_names_to_values[var_name]\n var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)\n if not var:\n raise ValueError('Variable %s wasn\\'t found' % var_name)\n elif len(var) > 1:\n # tf.get_collection is just a filter on the prefix: find the exact match:\n found = False\n for v in var:\n if v.op.name == var_name:\n var = v\n found = True\n break\n\n if not found:\n raise ValueError('Variable %s doesn\\'t uniquely identify a variable' %\n var_name)\n else:\n var = var[0]\n\n # TODO(nsilberman): ensure placeholder and assign are on the same device.\n # Assign a placeholder to the value that will be filled later.\n placeholder_name = 'placeholder/' + var.op.name\n placeholder_value = array_ops.placeholder(\n dtype=var.dtype.base_dtype,\n shape=var.get_shape(),\n name=placeholder_name)\n assign_ops.append(var.assign(placeholder_value))\n\n feed_dict[placeholder_value] = var_value.reshape(var.get_shape())\n\n assign_op = control_flow_ops.group(*assign_ops)\n return assign_op, feed_dict", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)", "def assign(self, dst, req, src):\n if req == 'null':\n return\n if req in ('write', 'inplace'):\n dst[:] = src\n elif req == 'add':\n dst[:] += src", "def assign_ids(ast):\n def f_either(obj, *child_results):\n id_ = slast.SlAst.id_\n obj.id_ = id_[0]\n id_[0] += 1\n\n # def f_either(obj, *child_results):\n # _id_dict = slast.SlAst._id_dict\n # id_ = slast.SlAst.id_\n # # FIXME: Assign same id to all data predicate calls with the same root/stop-nodes\n # key = str(obj.to_sl_expr())\n # if key in _id_dict:\n # obj.id_ = _id_dict[key]\n # else:\n # obj.id_ = id_[0]\n # _id_dict[key] = id_[0]\n # id_[0] += 1\n\n astutils.fold(f_either, f_either, ast)", "def assign_scope(from_scope, to_scope):\n assigns = []\n to_vars = variables.trainable_variables(to_scope)\n from_vars = variables.trainable_variables(from_scope)\n for dst, src in zip(to_vars, from_vars):\n assigns.append(state_ops.assign(dst, src))\n return control_flow_ops.group(*assigns)", "def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def get_assignment(self):\n assignment = Assignment()\n for effect in self._sub_effects:\n if not effect._negated:\n assignment.add_pair(effect.get_variable() + \"'\", effect.get_value())\n else:\n assignment.add_pair(effect.get_variable() + \"'\", ValueFactory.none())\n return assignment", "def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]", "def promote_live_variables(paths):\n for path in paths:\n symbol_table = {} # We build a new symbol table for each path\n for block in path:\n if isinstance(block, BasicBlock):\n new_statements = []\n for statement in block.statements:\n # Replace any symbols currently in the symbol table\n statement = replace_symbols(statement, symbol_table, ctx=ast.Load)\n # Fold constants\n statement = constant_fold(statement)\n # Update symbol table if the statement is an assign\n if is_assign_to_name(statement):\n symbol_table[statement.targets[0].id] = statement.value\n new_statements.append(statement)\n block.statements = new_statements\n elif isinstance(block, Branch):\n # For branches we just promote in the condition\n block.cond = replace_symbols(block.cond, symbol_table, ctx=ast.Load)\n block.cond = constant_fold(block.cond)\n return paths", "def do_assignment(val):\n if isinstance(val, collections.Iterable):\n if len(val) == 2 and all([isinstance(i, (int, float)) for i in val]):\n val1, val2 = val\n else:\n raise TypeError(\"do_assignment:: \"\n \"Expecting an iterable with 2 numeric elements\")\n elif isinstance(val, (int, float)):\n val1 = val2 = val\n else:\n raise TypeError(\"do_assignemnt:: \"\n \"Expecting value to be a number or an iterable\")\n return (val1, val2)", "def _unify_variables(self, variables):\n variables = [self._lookup(i) if isinstance(i, str) else i\n for i in variables]\n return variables", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def assign(self, assignee: np.ndarray):\n if isinstance(self.data, pd.DataFrame):\n self.data = pd.concat([self.data, assignee], axis=1, ignore_index=True)\n else:\n self.data = pd.DataFrame(data=assignee)", "def unfix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].unfix(var_value)\r\n\r\n return m", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def _make_hard_copy_ops(target_vars, online_vars):\n return [(target_vars[var_name].assign(online_vars[var_name]))\n for var_name in target_vars.keys()]", "def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def initialize_assignment(self):\n # Initialize empty frozensets for each agent\n init_assignment = frozendict({a:frozenset() for a in self.agents})\n \n # Add hard assignments\n if self.hard_assignment:\n init_dict = dict(init_assignment)\n for a, t in self.hard_assignment.items():\n init_dict[a] = init_dict[a] | t\n init_assignment = frozendict(init_dict)\n \n return init_assignment", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def rhs_name_transform_inplace(self, name_map):\n\n for name in name_map:\n replacment = name_map[name]\n self.rhs = MathUtil.str_expr_replacement(name, replacment,\n self.rhs)", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None", "def assignment(x, values, indices, axis=0):\n x_new = copy(x)\n\n use_vectorization = hasattr(indices, \"__len__\") and len(indices) < ndim(x)\n if _is_boolean(indices):\n x_new[indices] = values\n return x_new\n zip_indices = _is_iterable(indices) and _is_iterable(indices[0])\n len_indices = len(indices) if _is_iterable(indices) else 1\n if zip_indices:\n indices = tuple(zip(*indices))\n if not use_vectorization:\n if not zip_indices:\n len_indices = len(indices) if _is_iterable(indices) else 1\n len_values = len(values) if _is_iterable(values) else 1\n if len_values > 1 and len_values != len_indices:\n raise ValueError(\"Either one value or as many values as indices\")\n x_new[indices] = values\n else:\n indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))\n x_new[indices] = values\n return x_new", "def assignAtomsToRes(atomSets,resonance,resonanceSet=None):\n \n atomSets = list(atomSets)\n nmrProject = resonance.nmrProject\n \n chemAtomSetRef = None\n for atomSet in atomSets:\n atom = atomSet.findFirstAtom()\n if not setAssignmentMolSystem(atom.residue, resonance=resonance):\n return\n \n if len(atomSets) > 1:\n chemAtomSet = atom.chemAtom.chemAtomSet\n if chemAtomSet and chemAtomSet.chemAtomSet:\n chemAtomSet = chemAtomSet.chemAtomSet\n \n if not chemAtomSetRef:\n chemAtomSetRef = chemAtomSet\n \n #if (not chemAtomSet) or (not chemAtomSet.isProchiral) or (chemAtomSetRef is not chemAtomSet):\n if (not chemAtomSet) or (chemAtomSetRef is not chemAtomSet):\n info = [(aS, aS.findFirstAtom().residue) for aS in atomSets]\n data = ['%d%s %s' % (r.seqCode, r.ccpCode, aS.name) for aS, r in info]\n names = ','.join(data)\n msg = 'Resonance can only be assigned to multiple sets of atoms if '\n msg += 'the sets are prochiral. This is not true for the input %s. ' % names\n msg += 'Assignment will be made to %s only' % data[0]\n showWarning('Assignment failed', msg)\n atomSets = [atomSets[0],]\n break\n \n if resonance.isotopeCode: \n if resonance.isotopeCode != 'unknown':\n element = re.match('\\d+([A-Z]\\D*)', resonance.isotopeCode).group(1)\n if element != atom.chemAtom.elementSymbol:\n data = (resonance.isotopeCode,atom.chemAtom.elementSymbol)\n msg = 'A %s resonance cannot be assigned to %s atoms'\n showWarning('Assignment failed', msg % data)\n return\n \n if not resonanceSet:\n resonanceSet = findResonanceSet(resonance,atomSets)\n \n oldResonanceSet = resonance.resonanceSet\n if oldResonanceSet and (oldResonanceSet is not resonanceSet):\n if len(oldResonanceSet.resonances) == 1:\n oldResonanceSet.delete()\n else:\n oldAtomSets = list(oldResonanceSet.atomSets)\n oldResonances = list(oldResonanceSet.resonances) \n \n oldResonances.remove(resonance)\n for atomSet in atomSets:\n if atomSet in oldAtomSets:\n oldAtomSets.remove(atomSet)\n \n oldResonanceSet.delete() \n # Other half of a now split prochiral\n nmrProject.newResonanceSet(atomSets=oldAtomSets,\n resonances=oldResonances)\n \n if resonanceSet:\n resonances = list(resonanceSet.resonances)\n\n if not resonance in resonances:\n resonanceSet.addResonance(resonance)\n resonances.append(resonance)\n \n if len(resonances) > len(atomSets):\n residue = atomSets[0].findFirstAtom().residue\n aName = '/'.join([ass.name for ass in atomSets])\n data = (len(resonances), residue.seqCode, residue.ccpCode, aName)\n msg = 'There are more resonances (%d) than atoms sets for %d%s %s'\n showWarning('Redundant resonance', msg % data)\n \n for atomSet in resonanceSet.atomSets:\n if atomSet not in atomSets:\n resonanceSet.delete()\n resonanceSet = nmrProject.newResonanceSet(atomSets=atomSets,resonances=resonances)\n break\n \n else:\n resonanceSet = nmrProject.newResonanceSet(atomSets=atomSets,resonances=[resonance, ])\n\n initResonance(resonance)\n resonances = getBoundResonances(resonance, recalculate=True)\n \n if len(resonances) == 1:\n from ccpnmr.analysis.core.MoleculeBasic import getBoundAtoms\n \n resonance2 = resonances[0]\n if not resonance2.resonanceSet:\n\n isotope = resonance2.isotope\n if isotope:\n element = isotope.chemElement.symbol\n \n atomDict = {}\n for atomSet in atomSets:\n for atom in getBoundAtoms(atomSet.findFirstAtom()):\n if atom.chemAtom.elementSymbol == element:\n atomDict[atom] = True\n \n atomSetDict = {}\n for atom in atomDict.keys():\n atomSet2 = atom.atomSet\n if atomSet2:\n atomSetDict[atomSet2] = True\n \n atomSets2 = atomSetDict.keys()\n if len(atomSets2) == 1:\n assignAtomsToRes(atomSets2,resonances[0])\n \t \n return resonanceSet", "def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()", "def replace_vars(cls, itter_list, pattern, replace):\n\n for index, line in enumerate(itter_list):\n itter_list[index] = line.replace(pattern, replace)\n\n return itter_list", "def assign_from_values_fn(var_names_to_values):\n assign_op, feed_dict = assign_from_values(var_names_to_values)\n def callback(session):\n return session.run(assign_op, feed_dict)\n return callback", "def test_compiler_assignment(patch, compiler, lines, tree):\n patch.many(Objects, ['names', 'entity'])\n tree.assignment_fragment.service = None\n tree.assignment_fragment.mutation = None\n compiler.assignment(tree, '1')\n Objects.names.assert_called_with(tree.path)\n fragment = tree.assignment_fragment\n entity = get_entity(fragment.expression)\n Objects.entity.assert_called_with(entity)\n kwargs = {'name': Objects.names(), 'args': [Objects.entity()],\n 'parent': '1'}\n lines.append.assert_called_with('set', tree.line(), **kwargs)", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def varcopy(self, vars):", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def bulk_assign_product_coupons(desired_assignments, bulk_assignment=None):\n bulk_assignment = bulk_assignment or BulkCouponAssignment.objects.create()\n return (\n bulk_assignment,\n ProductCouponAssignment.objects.bulk_create(\n ProductCouponAssignment(\n email=email,\n product_coupon_id=product_coupon_id,\n bulk_assignment=bulk_assignment,\n )\n for email, product_coupon_id in desired_assignments\n ),\n )", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def assignToPairs(pairs, tasks, assign_same_quantity_of_tasks=False):\n pair__agents = list()\n for pair in pairs.keys():\n pair__agents.append(Agent.get_standard_agent(list(pairs[pair]), pair))\n environment = Environment(pair__agents, tasks)\n return solveAttributesAssignmentProblem(environment, assign_same_quantity_of_tasks)", "def update_individuals(individuals, eval_results):\n for ind, res in zip(individuals, eval_results):\n ind.fitness.values = res[0]\n ind.matching_node_pairs = res[1]\n ind.gtp_precisions = res[2]", "def fix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].fix(var_value)\r\n\r\n return m", "def expandInitialAssignments(*args):\n return _libsbml.SBMLTransforms_expandInitialAssignments(*args)", "def assign(values, s, d):\n other_values = values[s].replace(d, '')\n if all(eliminate(values, s, d2) for d2 in other_values):\n return values\n else:\n return False", "def assign(values, s, d):\n other_values = values[s].replace(d, '')\n if all(eliminate(values, s, d2) for d2 in other_values):\n return values\n else:\n return False", "def assign(values, s, d):\n other_values = values[s].replace(d, '')\n if all(eliminate(values, s, d2) for d2 in other_values):\n return values\n else:\n return False", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def solve(grid):\n assignment = grid_values(grid)\n assignment = eliminate(assignment)\n return assignment", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def test_remove_assignment_rule(self):\n pass", "def assign_when(lhs, rhs, conditions):\n for nd_index in np.ndindex(lhs.shape):\n if conditions[nd_index]:\n lhs[nd_index] = rhs[nd_index]", "def removeAssignmentNotifiers():\n\n assignmentNotifiers(Implementation.unregisterNotify)", "def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)", "def _unify_exprs(self, exprs):\n if isinstance(exprs, (str, unicode)):\n # We are only being given a single string expression.\n exprs = self.exprs[exprs]\n elif isinstance(exprs, theano.tensor.basic.TensorVariable):\n # TODO: does this work in case of the GPU?\n exprs = exprs\n else:\n # We have several, either string or variable, thus make it a list\n # and substitute the strings.\n exprs = list(exprs)\n exprs = [self.exprs[i] if isinstance(i, str) else i for i in exprs]\n\n return exprs", "def invert_assignment(self, idx2_wxs, idx2_maws, *other_idx2_prop):\n # Invert mapping -- Group by word indexes\n idx2_nAssign = [len(wxs) for wxs in idx2_wxs]\n jagged_idxs = [[idx] * num for idx, num in enumerate(idx2_nAssign)]\n wx_keys, groupxs = vt.jagged_group(idx2_wxs)\n idxs_list = vt.apply_jagged_grouping(jagged_idxs, groupxs)\n wx2_idxs = dict(zip(wx_keys, idxs_list))\n maws_list = vt.apply_jagged_grouping(idx2_maws, groupxs)\n wx2_maws = dict(zip(wx_keys, maws_list))\n\n other_wx2_prop = []\n for idx2_prop in other_idx2_prop:\n # Props are assumed to be non-jagged, so make them jagged\n jagged_prop = [[prop] * num for prop, num in zip(idx2_prop, idx2_nAssign)]\n prop_list = vt.apply_jagged_grouping(jagged_prop, groupxs)\n wx2_prop = dict(zip(wx_keys, prop_list))\n other_wx2_prop.append(wx2_prop)\n if ut.VERBOSE:\n print('[smk_index.assign] L___ End Assign vecs to words.')\n assignment = (wx2_idxs, wx2_maws) + tuple(other_wx2_prop)\n return assignment", "def sequence_unpack():\n my_list = [1, 2, 3, 4, 5]\n a, b, c, d, e = my_list\n print(e)\n a, b = \"HI\"\n print(b)", "def assign(self, *args):\n return _ida_hexrays.cif_t_assign(self, *args)", "def parameter_assignments(self, parameter_assignments):\n\n self._parameter_assignments = parameter_assignments", "def multiplyAssignmentsToSIdByFunction(self, *args):\n return _libsbml.EventAssignment_multiplyAssignmentsToSIdByFunction(self, *args)", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def assign(self, *args):\n return _ida_hexrays.cloop_t_assign(self, *args)", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def assign(ary, out):\n\n from . import _bh\n\n if not np.isscalar(ary):\n (ary, out) = broadcast_arrays(ary, out)[0]\n # We ignore self assignments\n if _bh.same_view(ary, out):\n return\n\n # Assigning empty arrays doesn't do anything\n if hasattr(ary, \"size\"):\n if ary.size == 0:\n return\n if hasattr(out, \"size\"):\n if out.size == 0:\n return\n\n # We use a tmp array if the in-/out-put has memory conflicts\n if overlap_conflict(out, ary):\n tmp = array_create.empty_like(out)\n assign(ary, tmp)\n return assign(tmp, out)\n\n if bhary.check(out):\n _bh.ufunc(UFUNCS[\"identity\"].info['id'], (out, ary))\n else:\n if bhary.check(ary):\n if \"BH_SYNC_WARN\" in os.environ:\n import warnings\n warnings.warn(\"BH_SYNC_WARN: Copying the array to NumPy\", RuntimeWarning, stacklevel=2)\n ary = ary.copy2numpy()\n out[...] = ary", "def _unpack_fix(self, lhs, rhs):\r\n\r\n ln = lhs.line\r\n co = lhs.index\r\n name = \"$js$unpack$%d$%d_\" % (ln, co)\r\n\r\n ident = Token(Token.T_TEXT, ln, co, name)\r\n comma = Token(Token.T_COMMA, ln, co, \",\")\r\n\r\n comma.children.append(\r\n self._h_assign(ident, rhs)\r\n )\r\n\r\n do_replace = False\r\n #to_define = []\r\n stack = [(ident, lhs.clone())]\r\n\r\n # TODO: prune children of comma that are not used once the stack is empty\r\n while stack:\r\n # ident is a token representing the T_TEXT variable name\r\n # tok is the left hand side expression\r\n # which may be a sequence or object to unpack ident into\r\n # together, they assume an expression of the form:\r\n # `${tok} = ${ident}`\r\n # the expression will be re-written to support this minify\r\n ident, tok = stack.pop(0)\r\n\r\n if tok.type in (Token.T_UNPACK_SEQUENCE, Token.T_LIST):\r\n if tok.type == Token.T_LIST:\r\n sys.stderr.write(\"warning: line: %d column: %d: found LIST expected T_UNPACK_SEQUENCE\" %(\r\n tok.line, tok.column))\r\n\r\n # when unpacking sequences pull out objects and assign those\r\n # indexes\r\n nested = self._unpack_sequence_scan(ident.value, tok.children)\r\n\r\n if tok.children:\r\n node = self._h_assign(tok, ident)\r\n comma.children.append(node)\r\n\r\n for idx, placeholder, elem in nested:\r\n stack.append((placeholder, elem))\r\n\r\n # FIXME: should never be T_OBJECT\r\n elif tok.type in (Token.T_UNPACK_OBJECT, Token.T_OBJECT):\r\n if tok.type == Token.T_OBJECT:\r\n sys.stderr.write(\"warning: line: %d column: %d: found OBJECT expected UNPACK_OBJECT\" %(\r\n tok.line, tok.column))\r\n\r\n nested, extra = self._unpack_object_scan(ident.value, tok.children)\r\n if tok.children:\r\n node = self._h_assign(tok, ident)\r\n comma.children.append(node)\r\n\r\n for idx, child in enumerate(node.children[0].children):\r\n if child.type == Token.T_TEXT:\r\n tmp_a = Token(Token.T_ATTR, ln, co, child.value)\r\n tmp_b = Token(Token.T_TEXT, ln, co, child.value)\r\n node.children[0].children[idx] = Token(Token.T_BINARY, ln, co, \":\", [tmp_a, tmp_b])\r\n elif child.type == Token.T_BINARY and child.value == \":\":\r\n pass\r\n else:\r\n raise TransformError(child, \"invalid token in object destructuring\")\r\n for src, dst, child in nested:\r\n\r\n attr, obj = child.children\r\n lhs = Token(Token.T_TEXT, ln, co, dst)\r\n rhs = self._h_get_attr(Token(Token.T_TEXT, ln, co, src), attr.value)\r\n comma.children.append(self._h_assign(lhs, rhs))\r\n\r\n stack.append((lhs, obj))\r\n\r\n for child in extra:\r\n\r\n lhs, rhs = child.children\r\n comma.children.append(self._h_get_attr_undefined(ident, lhs.value, rhs))\r\n do_replace = True\r\n\r\n if do_replace:\r\n return comma\r\n\r\n return None", "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def assign_params(sess, params, network):\n for idx, param in enumerate(params):\n assign_op = network.all_params[idx].assign(param)\n sess.run(assign_op)", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values" ]
[ "0.6289772", "0.6041632", "0.5921341", "0.5901611", "0.57016975", "0.5555649", "0.55510634", "0.5515211", "0.5515172", "0.55127996", "0.54904634", "0.5386734", "0.5356579", "0.5338033", "0.53110933", "0.528611", "0.52802086", "0.5280196", "0.5250439", "0.52424175", "0.51393026", "0.51119703", "0.5082667", "0.507607", "0.50642174", "0.5058741", "0.50538284", "0.5050953", "0.5042711", "0.5025477", "0.5018196", "0.5018076", "0.5015217", "0.50124913", "0.5007621", "0.50036937", "0.50027853", "0.50007945", "0.50007945", "0.50007945", "0.50007945", "0.50007945", "0.49973962", "0.49902314", "0.49878195", "0.4974616", "0.49709556", "0.49685183", "0.496844", "0.49655804", "0.4962419", "0.49519858", "0.4950232", "0.4940512", "0.49384552", "0.49348405", "0.4916957", "0.49084935", "0.49061385", "0.4903207", "0.48995143", "0.48981717", "0.489626", "0.4847895", "0.48449987", "0.4837877", "0.4835083", "0.4835083", "0.4835083", "0.48347062", "0.48330027", "0.48224533", "0.4820245", "0.480456", "0.48032466", "0.48021334", "0.47956622", "0.4790942", "0.47790813", "0.47745642", "0.47558004", "0.47530508", "0.47463846", "0.47330624", "0.4729386", "0.47286808", "0.47259092", "0.47179383", "0.4706249", "0.470393", "0.4702949", "0.46972835", "0.46966037", "0.46858585", "0.46844307", "0.46844307", "0.46844307", "0.46844307", "0.46844307", "0.46844307" ]
0.55901885
5
Visit assignment node whose targets are all simple.
def visit_simple_assign(self, node): temp = gensym() temp_target = to_name(temp, ast.Store()) stmts = [ ast.Assign([temp_target], node.value) ] stmts += [ ast.Assign([target], to_name(temp)) for target in node.targets ] return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n attributes: List[util.SQLAlchemyAttribute],\n) -> None:\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if api.parse_bool(stmt.rvalue) is True:\n util.set_is_base(cls.info)\n return\n elif node.name == \"__tablename__\":\n util.set_has_table(cls.info)\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n apply.apply_mypy_mapped_attr(cls, api, item, attributes)\n\n left_hand_mapped_type: Optional[Type] = None\n left_hand_explicit_type: Optional[ProperType] = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup_qualified(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and mapped_sym.node is not None\n and names.type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = get_proper_type(\n stmt.type.args[0]\n )\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n else:\n node_type = get_proper_type(node.type)\n if (\n isinstance(node_type, Instance)\n and names.type_id_for_named_node(node_type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = get_proper_type(node_type.args[0])\n left_hand_mapped_type = node_type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node_type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n python_type_for_type = infer.infer_type_from_right_hand_nameexpr(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee\n )\n\n if python_type_for_type is None:\n return\n\n else:\n return\n\n assert python_type_for_type is not None\n\n attributes.append(\n util.SQLAlchemyAttribute(\n name=node.name,\n line=stmt.line,\n column=stmt.column,\n typ=python_type_for_type,\n info=cls.info,\n )\n )\n\n apply.apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret", "def process_assignment_ast(stmt_ast: ast.Assign, stmt_ast_parent_block):\n logger.log.info(\"Generating SymbolicState instance from assignment ast\")\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(\"Instantiating symbolic state for AST instance stmt_ast = %s\" % stmt_ast)\n # determine the program variables assigned on the left-hand-side\n targets: list = stmt_ast.targets\n # extract names - for now just care about normal program variables, not attributes or functions\n logger.log.info(\"Extracting list of assignment target names\")\n target_names: list = []\n for target in targets:\n target_names += extract_symbol_names_from_target(target)\n logger.log.info(\"List of all program variables changed is %s\" % target_names)\n # extract function names\n assigned_value = stmt_ast.value\n function_names: list = extract_function_names(assigned_value)\n logger.log.info(\"List of all program functions called is %s\" % function_names)\n # merge the two lists of symbols\n logger.log.info(\"Merging lists of assignment target names and function names\")\n all_symbols: list = target_names + function_names\n logger.log.info(\"List of all symbols to mark as changed in the symbolic state is %s\" % all_symbols)\n # set up a SymbolicState instance\n logger.log.info(\"Instantiating new StatementSymbolicState instance with all_symbols = %s\" % all_symbols)\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts", "def visit_Node(self, node):\n pass", "def visit(self, node):", "def visit(self, node):", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def test_list_assignments_for_tree(self):\n # Enable OS-INHERIT extension\n\n test_plan = {\n # Create a domain with a project hierarchy 3 levels deep:\n #\n # project 0\n # ____________|____________\n # | |\n # project 1 project 4\n # ______|_____ ______|_____\n # | | | |\n # project 2 project 3 project 5 project 6\n #\n # Also, create 1 user and 4 roles.\n 'entities': {\n 'domains': {\n 'projects': {'project': [{'project': 2},\n {'project': 2}]},\n 'users': 1},\n 'roles': 4},\n 'assignments': [\n # Direct assignment to projects 1 and 2\n {'user': 0, 'role': 0, 'project': 1},\n {'user': 0, 'role': 1, 'project': 2},\n # Also an inherited assignment on project 1\n {'user': 0, 'role': 2, 'project': 1,\n 'inherited_to_projects': True},\n # ...and two spoiler assignments, one to the root and one\n # to project 4\n {'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 3, 'project': 4}],\n 'tests': [\n # List all assignments for project 1 and its subtree.\n {'params': {'project': 1, 'include_subtree': True},\n 'results': [\n # Only the actual assignments should be returned, no\n # expansion of inherited assignments\n {'user': 0, 'role': 0, 'project': 1},\n {'user': 0, 'role': 1, 'project': 2},\n {'user': 0, 'role': 2, 'project': 1,\n 'inherited_to_projects': 'projects'}]}\n ]\n }\n\n self.execute_assignment_plan(test_plan)", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def visit_Assign(self, node: Assign) -> None:\n\n node_type = type(node.right).__name__\n if isinstance(node.right, String):\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n c_str = self.builder.alloca(instruct.type)\n self.builder.store(instruct, c_str)\n self.builder.ret_void()\n else:\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n self.builder.ret(instruct)\n\n self.GLOBAL_MEMORY[node.left.value] = instruct", "def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)", "def generic_visit(self, node: ast.AST) -> None:", "def assign_simple_node_features(ndata, g, ntype, assign_id=False):\n for col in g.nodes[ntype].data.keys():\n if not assign_id and col == dgl.NID:\n continue\n induced_nodes = ndata[dgl.NID]\n ndata[col] = g.nodes[ntype].data[col][induced_nodes]", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def test_obj_action_for_assignments():\n grammar = r\"\"\"\n S: a=\"foo\" b?=\"bar\" c=C+;\n C: val=\"baz\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = Parser(g)\n\n result = p.parse(\"foo bar baz baz baz\")\n\n assert isinstance(result, g.classes['S'])\n assert isinstance(result.c[0], g.classes['C'])\n\n assert result.a == \"foo\"\n assert result.b is True\n assert len(result.c) == 3\n assert all((c.val == \"baz\" for c in result.c))", "def test_multi_source_explicit(self):\n with Graph('g') as graph:\n graph.source | Node('a') | graph.sink\n graph.source * 'out2' | Node('b') | 'in2' * graph.sink", "def default_visit(self, node):\n pass", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def dfs_visit(self, node):\n super(MutantGenerator, self).generic_visit(node)", "def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e", "def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))", "def test_list_effective_assignments_for_tree_with_mixed_assignments(self):\n test_plan = {\n # Create a domain with a project hierarchy 3 levels deep:\n #\n # project 0\n # ____________|____________\n # | |\n # project 1 project 4\n # ______|_____ ______|_____\n # | | | |\n # project 2 project 3 project 5 project 6\n #\n # Also, create 2 users, 1 group and 4 roles.\n 'entities': {\n 'domains': {\n 'projects': {'project': [{'project': 2},\n {'project': 2}]},\n 'users': 2, 'groups': 1},\n 'roles': 4},\n # Both users are part of the same group\n 'group_memberships': [{'group': 0, 'users': [0, 1]}],\n # We are going to ask for listing of assignment on project 1 and\n # it's subtree. So first we'll add two inherited assignments above\n # this (one user and one for a group that contains this user).\n 'assignments': [{'user': 0, 'role': 0, 'project': 0,\n 'inherited_to_projects': True},\n {'group': 0, 'role': 1, 'project': 0,\n 'inherited_to_projects': True},\n # Now an inherited assignment on project 1 itself,\n # which should ONLY show up on its children\n {'user': 0, 'role': 2, 'project': 1,\n 'inherited_to_projects': True},\n # ...and a direct assignment on one of those\n # children\n {'user': 0, 'role': 3, 'project': 2},\n # The rest are spoiler assignments\n {'user': 0, 'role': 2, 'project': 5},\n {'user': 0, 'role': 3, 'project': 4}],\n 'tests': [\n # List all effective assignments for project 1 and its subtree.\n {'params': {'project': 1, 'user': 0, 'effective': True,\n 'include_subtree': True},\n 'results': [\n # First, we should see the inherited user assignment from\n # project 0 on all projects in the subtree\n {'user': 0, 'role': 0, 'project': 1,\n 'indirect': {'project': 0}},\n {'user': 0, 'role': 0, 'project': 2,\n 'indirect': {'project': 0}},\n {'user': 0, 'role': 0, 'project': 3,\n 'indirect': {'project': 0}},\n # Also the inherited group assignment from project 0 on\n # the subtree\n {'user': 0, 'role': 1, 'project': 1,\n 'indirect': {'project': 0, 'group': 0}},\n {'user': 0, 'role': 1, 'project': 2,\n 'indirect': {'project': 0, 'group': 0}},\n {'user': 0, 'role': 1, 'project': 3,\n 'indirect': {'project': 0, 'group': 0}},\n # The inherited assignment on project 1 should appear only\n # on its children\n {'user': 0, 'role': 2, 'project': 2,\n 'indirect': {'project': 1}},\n {'user': 0, 'role': 2, 'project': 3,\n 'indirect': {'project': 1}},\n # And finally the direct assignment on project 2\n {'user': 0, 'role': 3, 'project': 2}]}\n ]\n }\n\n self.execute_assignment_plan(test_plan)", "def switch(self, tree, i, j):\n self.first_target = min(i, j)\n self.second_target = max(i, j)\n\n self.original_ast = tree\n\n return self.visit(tree)", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def generic_visit(self, node):\n\t\tfor attr, vs in ast.iter_fields(node):\n\t\t\tif isinstance(vs, ast.AST):\n\t\t\t\tself.graph[self.make_label(node)].append(self.make_label(vs))\n\t\t\t\tself.visit(vs)\n\t\t\telif isinstance(vs, list):\n\t\t\t\tfor v in vs:\n\t\t\t\t\tif isinstance(v, ast.AST):\n\t\t\t\t\t\tself.graph[self.make_label(node)].append(self.make_label(v))\n\t\t\t\t\t\tself.visit(v)\n\t\t\telse:\n\t\t\t\tpass", "def mutate_single_node(self, node, operator):\n if node.__class__ is operator[0] or (operator[1] is StatementDeletion and node.__class__ is ast.Pass):\n mutated_node = operator[1].mutate(node)\n node = mutated_node\n\n return node", "def get_assignment_literal_value(self):\n if not self.is_single_assign:\n raise ValueError(\n \"Statement is not an assignment to a single name: %s\" % self)\n n = self.ast_node\n target_name = n.targets[0].id\n literal_value = ast.literal_eval(n.value)\n return (target_name, literal_value)", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def test_RestrictingNodeTransformer__visit_Is__1():\n assert restricted_eval('None is None') is True", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)", "def visitNode(node,doublevars,doublevars_modified):\n children = []\n doublevars_predefined = set()\n if hasattr(node, \"content\"):\n children = node.content\n elif hasattr(node, \"items\"):\n children = node.items\n elif type(node) in (tuple, list):\n children = node\n for child in children:\n if(type(child)==fparser.one.statements.Assignment):\n lhs = cleanVariableName(child.variable)\n # Visit an assignment statement, e.g. \"a = b + c\"\n if(lhs in doublevars):\n doublevars_modified.add(lhs)\n rhs = child.expr\n readDoubleVars = set(filter(lambda x: x in rhs, doublevars))\n doublevars_predefined = doublevars_predefined.union(readDoubleVars.difference(doublevars_modified))\n else:\n newmodified, newpredefined = visitNode(child, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n return doublevars_modified, doublevars_predefined", "def expandInitialAssignments(*args):\n return _libsbml.SBMLTransforms_expandInitialAssignments(*args)", "def test_list_effective_assignments_for_tree(self):\n test_plan = {\n # Create a domain with a project hierarchy 3 levels deep:\n #\n # project 0\n # ____________|____________\n # | |\n # project 1 project 4\n # ______|_____ ______|_____\n # | | | |\n # project 2 project 3 project 5 project 6\n #\n # Also, create 1 user and 4 roles.\n 'entities': {\n 'domains': {\n 'projects': {'project': [{'project': 2},\n {'project': 2}]},\n 'users': 1},\n 'roles': 4},\n 'assignments': [\n # An inherited assignment on project 1\n {'user': 0, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n # A direct assignment to project 2\n {'user': 0, 'role': 2, 'project': 2},\n # ...and two spoiler assignments, one to the root and one\n # to project 4\n {'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 3, 'project': 4}],\n 'tests': [\n # List all effective assignments for project 1 and its subtree.\n {'params': {'project': 1, 'effective': True,\n 'include_subtree': True},\n 'results': [\n # The inherited assignment on project 1 should appear only\n # on its children\n {'user': 0, 'role': 1, 'project': 2,\n 'indirect': {'project': 1}},\n {'user': 0, 'role': 1, 'project': 3,\n 'indirect': {'project': 1}},\n # And finally the direct assignment on project 2\n {'user': 0, 'role': 2, 'project': 2}]}\n ]\n }\n\n self.execute_assignment_plan(test_plan)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def _parse_initial_assignments(self, model, comp, node):\n node = dom_child(node, 'initialAssignment')\n while node:\n var = str(node.getAttribute('symbol')).strip()\n var = self._convert_name(var)\n if var in comp:\n self.log('Parsing initial assignment for \"' + var + '\".')\n var = comp[var]\n expr = parse_mathml_rhs(dom_child(node, 'math'), comp, self)\n if var.is_state():\n # Initial value\n var.set_state_value(expr, default=True)\n else:\n # Change of value\n var.set_rhs(expr)\n else:\n raise SBMLError('Initial assignment found for unknown'\n ' parameter <' + var + '>.')\n node = dom_next(node, 'initialAssignment')", "def test_list_effective_assignments_for_tree_with_domain_assignments(self):\n test_plan = {\n # Create a domain with a project hierarchy 3 levels deep:\n #\n # project 0\n # ____________|____________\n # | |\n # project 1 project 4\n # ______|_____ ______|_____\n # | | | |\n # project 2 project 3 project 5 project 6\n #\n # Also, create 1 user and 4 roles.\n 'entities': {\n 'domains': {\n 'projects': {'project': [{'project': 2},\n {'project': 2}]},\n 'users': 1},\n 'roles': 4},\n 'assignments': [\n # An inherited assignment on the domain (which should be\n # applied to all the projects)\n {'user': 0, 'role': 1, 'domain': 0,\n 'inherited_to_projects': True},\n # A direct assignment to project 2\n {'user': 0, 'role': 2, 'project': 2},\n # ...and two spoiler assignments, one to the root and one\n # to project 4\n {'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 3, 'project': 4}],\n 'tests': [\n # List all effective assignments for project 1 and its subtree.\n {'params': {'project': 1, 'effective': True,\n 'include_subtree': True},\n 'results': [\n # The inherited assignment from the domain should appear\n # only on the part of the subtree we are interested in\n {'user': 0, 'role': 1, 'project': 1,\n 'indirect': {'domain': 0}},\n {'user': 0, 'role': 1, 'project': 2,\n 'indirect': {'domain': 0}},\n {'user': 0, 'role': 1, 'project': 3,\n 'indirect': {'domain': 0}},\n # And finally the direct assignment on project 2\n {'user': 0, 'role': 2, 'project': 2}]}\n ]\n }\n\n self.execute_assignment_plan(test_plan)", "def node_assignment(edge_index: nb.int64[:,:],\n edge_label: nb.int64[:],\n n: nb.int64) -> nb.int64[:]:\n # Loop over on edges, reset the group IDs of connected node\n on_edges = edge_index[np.where(edge_label)[0]]\n return union_find(on_edges, n)[0]", "def test_remove_assignment_rule(self):\n pass", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def test_compiler_assignment(patch, compiler, lines, tree):\n patch.many(Objects, ['names', 'entity'])\n tree.assignment_fragment.service = None\n tree.assignment_fragment.mutation = None\n compiler.assignment(tree, '1')\n Objects.names.assert_called_with(tree.path)\n fragment = tree.assignment_fragment\n entity = get_entity(fragment.expression)\n Objects.entity.assert_called_with(entity)\n kwargs = {'name': Objects.names(), 'args': [Objects.entity()],\n 'parent': '1'}\n lines.append.assert_called_with('set', tree.line(), **kwargs)", "def _compat_assign_gast_5(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value, type_comment=type_comment)", "def buildProcessesOutOfAssignments(self):\n assigments = where(self.startsOfDataPaths,\n lambda x: isinstance(x, Assignment)\n )\n for sig, dps in groupedby(assigments, lambda x: x.dst):\n dps = list(dps)\n name = \"\"\n if not sig.hasGenericName:\n name = sig.name\n sig.hidden = False\n \n # render sequential statements in process\n # (conversion from netlist to statements)\n for stm in renderIfTree(dps):\n p = HWProcess(\"assig_process_\" + name)\n if sig._useNopVal and not isEnclosed(stm):\n n = sig._nopVal\n p.statements.append(Assignment(n, sig))\n if isinstance(n, RtlSignal):\n p.sensitivityList.add(n)\n \n p.statements.append(stm)\n sensitivity = discoverSensitivity(stm)\n p.sensitivityList.update(sensitivity)\n for s in p.sensitivityList:\n s.hidden = False\n\n yield p", "def SBMLTransforms_expandInitialAssignments(*args):\n return _libsbml.SBMLTransforms_expandInitialAssignments(*args)", "def random_assignment(graph, possibilities):\n for node in graph.nodes.values():\n node.set_value(random.choice(possibilities))", "def visit_assignname( # pylint: disable=too-many-branches\n self, node: nodes.AssignName\n ) -> None:\n frame = node.frame(future=True)\n assign_type = node.assign_type()\n\n # Check names defined in comprehensions\n if isinstance(assign_type, nodes.Comprehension):\n self._check_name(\"inlinevar\", node.name, node)\n\n # Check names defined in module scope\n elif isinstance(frame, nodes.Module):\n # Check names defined in Assign nodes\n if isinstance(assign_type, nodes.Assign):\n inferred_assign_type = utils.safe_infer(assign_type.value)\n\n # Check TypeVar's and TypeAliases assigned alone or in tuple assignment\n if isinstance(node.parent, nodes.Assign):\n if self._assigns_typevar(assign_type.value):\n self._check_name(\"typevar\", assign_type.targets[0].name, node)\n return\n if self._assigns_typealias(assign_type.value):\n self._check_name(\"typealias\", assign_type.targets[0].name, node)\n return\n\n if (\n isinstance(node.parent, nodes.Tuple)\n and isinstance(assign_type.value, nodes.Tuple)\n # protect against unbalanced tuple unpacking\n and node.parent.elts.index(node) < len(assign_type.value.elts)\n ):\n assigner = assign_type.value.elts[node.parent.elts.index(node)]\n if self._assigns_typevar(assigner):\n self._check_name(\n \"typevar\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n if self._assigns_typealias(assigner):\n self._check_name(\n \"typealias\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n\n # Check classes (TypeVar's are classes so they need to be excluded first)\n elif isinstance(inferred_assign_type, nodes.ClassDef):\n self._check_name(\"class\", node.name, node)\n\n # Don't emit if the name redefines an import in an ImportError except handler.\n elif not _redefines_import(node) and isinstance(\n inferred_assign_type, nodes.Const\n ):\n self._check_name(\"const\", node.name, node)\n else:\n self._check_name(\n \"variable\", node.name, node, disallowed_check_only=True\n )\n\n # Check names defined in AnnAssign nodes\n elif isinstance(assign_type, nodes.AnnAssign):\n if utils.is_assign_name_annotated_with(node, \"Final\"):\n self._check_name(\"const\", node.name, node)\n elif self._assigns_typealias(assign_type.annotation):\n self._check_name(\"typealias\", node.name, node)\n\n # Check names defined in function scopes\n elif isinstance(frame, nodes.FunctionDef):\n # global introduced variable aren't in the function locals\n if node.name in frame and node.name not in frame.argnames():\n if not _redefines_import(node):\n self._check_name(\"variable\", node.name, node)\n\n # Check names defined in class scopes\n elif isinstance(frame, nodes.ClassDef):\n if not list(frame.local_attr_ancestors(node.name)):\n for ancestor in frame.ancestors():\n if utils.is_enum(ancestor) or utils.is_assign_name_annotated_with(\n node, \"Final\"\n ):\n self._check_name(\"class_const\", node.name, node)\n break\n else:\n self._check_name(\"class_attribute\", node.name, node)", "def execute_assignment_cases(self, test_plan, test_data):\n def check_results(expected, actual, param_arg_count):\n if param_arg_count == 0:\n # It was an unfiltered call, so default fixture assignments\n # might be polluting our answer - so we take into account\n # how many assignments there were before the test.\n self.assertEqual(\n len(expected) + test_data['initial_assignment_count'],\n len(actual))\n else:\n self.assertThat(actual, matchers.HasLength(len(expected)))\n\n for each_expected in expected:\n expected_assignment = {}\n for param in each_expected:\n if param == 'inherited_to_projects':\n expected_assignment[param] = each_expected[param]\n elif param == 'indirect':\n # We're expecting the result to contain an indirect\n # dict with the details how the role came to be placed\n # on this entity - so convert the key/value pairs of\n # that dict into real entity references.\n indirect_term = {}\n for indirect_param in each_expected[param]:\n key, value = self._convert_entity_shorthand(\n indirect_param, each_expected[param],\n test_data)\n indirect_term[key] = value\n expected_assignment[param] = indirect_term\n else:\n # Convert a simple shorthand entry into a full\n # entity reference\n key, value = self._convert_entity_shorthand(\n param, each_expected, test_data)\n expected_assignment[key] = value\n self.assertIn(expected_assignment, actual)\n\n def convert_group_ids_sourced_from_list(index_list, reference_data):\n value_list = []\n for group_index in index_list:\n value_list.append(\n reference_data['groups'][group_index]['id'])\n return value_list\n\n # Go through each test in the array, processing the input params, which\n # we build into an args dict, and then call list_role_assignments. Then\n # check the results against those specified in the test plan.\n for test in test_plan.get('tests', []):\n args = {}\n for param in test['params']:\n if param in ['effective', 'inherited', 'include_subtree']:\n # Just pass the value into the args\n args[param] = test['params'][param]\n elif param == 'source_from_group_ids':\n # Convert the list of indexes into a list of IDs\n args[param] = convert_group_ids_sourced_from_list(\n test['params']['source_from_group_ids'], test_data)\n else:\n # Turn 'entity : 0' into 'entity_id = ac6736ba873d'\n # where entity in user, group, project or domain\n key, value = self._convert_entity_shorthand(\n param, test['params'], test_data)\n args[key] = value\n results = self.assignment_api.list_role_assignments(**args)\n check_results(test['results'], results, len(args))", "def convert_assign_value(g, op, block):\n\n keys = [\"bool_values\", \"fp32_values\", \"int32_values\", \"int64_values\"]\n dtypes = [\"bool\", \"float32\", \"int32\", \"int64\"]\n for i, key in enumerate(keys):\n dtype = dtypes[i]\n value = np.array(op.attr(key)).astype(dtype)\n if value is not None and value.size >= 1:\n break\n shape = op.attr(\"shape\")\n value = value.reshape(shape)\n out = _op.const(value, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def visit_node(self, node: OnnxNode, network: Network):\n pass", "def test_simple_dag(self, test_taskgroup_dag, simple_dag_expected_edges):\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n op1 >> Label(\"Label\") >> group >> op4\n compare_dag_edges(dag_edges(dag), simple_dag_expected_edges)", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def get_assignments(function_node, object_name):\n\n # This only supports simple assignments such as \"name.attr = value\" or \"name[index] = value\". Other\n # assignments will either throw an exception or not return the correct thing.\n # This code could be modified to allow for more robust statements but I kept it simple because the\n # code should already be formatted for these conditions.\n assignments = []\n for node in ast.walk(function_node):\n if isinstance(node, ast.Assign):\n assign = node\n if isinstance(assign.targets[0], ast.Subscript):\n subscript = assign.targets[0]\n if isinstance(subscript.value, ast.Name):\n name = subscript.value.id # This is the ast.Name related to the object_name\n if name == object_name:\n subscript_value = convert_literal_node(subscript.slice.value)\n value = convert_literal_node(assign.value)\n new_assignment = SubscriptAssignment(object_name=object_name, subscript=subscript_value, value=value)\n assignments.append(new_assignment)\n elif isinstance(assign.targets[0], ast.Attribute):\n attribute = assign.targets[0]\n if isinstance(attribute.value, ast.Name):\n name = attribute.value.id # This is the ast.Name related to the object_name\n if name == object_name:\n attribute_name = attribute.attr\n attribute_value = convert_literal_node(assign.value)\n new_assignment = AttributeAssignment(object_name=object_name, attribute=attribute_name, value=attribute_value)\n assignments.append(new_assignment)\n return assignments", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def generic_visit(self, node: ast.AST) -> None:\n for v in iter_values(node):\n self.visit(v)", "def group_assignments(assignment_list):\n return group_nodes(assignment_list, 1)", "def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos", "def verify_assign(self, d_stmt, table):\n lvalue = DanaExpr.factory(d_stmt.find_first_child(\"p_lvalue\"), table)\n expr = DanaExpr.factory(d_stmt.find_first_child(\"p_expr\"), table)\n self.exprs = [lvalue, expr]\n\n expr.type.check_type(d_stmt.linespan, lvalue.type)\n expr.type.in_types(d_stmt.linespan, [DanaType(\"int\"), DanaType(\"byte\")])", "def visit_AttributeDeclaration(self, node):\n default = node.default\n if default is not None:\n self.visit(node.default)", "def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0", "def test_statement_initialized_by_assignment():\n shap = Statement(shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\")\n shap2 = Statement()\n shap2.shape_id = \"@photo\"\n shap2.prop_id = \"dcterms:creator\"\n shap2.value_type = \"URI\"\n assert shap == shap2", "def default_visit(self, node):\n raise ValueError('Unhandled Node %s.' % node)", "def default_visit(self, node):\n raise ValueError('Unhandled Node %s.' % node)", "def generic_visit(self, node):\n for field in node._fields:\n try:\n value = getattr(node, field)\n except AttributeError:\n continue\n if isinstance(value, list):\n for item in value:\n if isinstance(item, ast.AST):\n self.visit(item)\n elif isinstance(value, ast.AST):\n self.visit(value)", "def test_RestrictingNodeTransformer__visit_Eq__1():\n assert restricted_eval('1 == int(\"1\")') is True", "def expandInitialAssignments(self):\n return _libsbml.SBMLDocument_expandInitialAssignments(self)", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def test_multi_sink_explicit(self):\n with Graph('g') as graph:\n pike.glob('a', '*') | graph.sink\n pike.glob('b', '*') | 'in2' * graph.sink", "def visit_Assign(self, node):\n var_name = node.left.token.value\n self.GLOBAL_SCOPE[var_name] = self.visit(node.right)", "def __init__(self, target, stack, values, p):\n\n self.stack = stack\n \n for value in values:\n # Feed the extension result to the target:\n extended = Assignment(p=p, value=value) | target\n # Stack ``extended``:\n self.stack ^ extended", "def test_role_assignments_simple_tree_of_implied_roles_on_domain(self):\n test_plan = {\n 'entities': {'domains': {'users': 1},\n 'roles': 4},\n # Three level tree of implied roles\n 'implied_roles': [{'role': 0, 'implied_roles': 1},\n {'role': 1, 'implied_roles': [2, 3]}],\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0}],\n 'tests': [\n # List all direct assignments for user[0], this should just\n # show the one top level role assignment\n {'params': {'user': 0},\n 'results': [{'user': 0, 'role': 0, 'domain': 0}]},\n # Listing in effective mode should how the implied roles\n # expanded out\n {'params': {'user': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'domain': 0,\n 'indirect': {'role': 0}},\n {'user': 0, 'role': 2, 'domain': 0,\n 'indirect': {'role': 1}},\n {'user': 0, 'role': 3, 'domain': 0,\n 'indirect': {'role': 1}}]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def test_parser_run_generates_correct_ast_variable_statements(self):\n tokens = [\n Token(TokenType.VARIABLE, 'Variable identifier'),\n Token(TokenType.IDENTIFIER, 'everything'),\n Token(TokenType.EQUAL, '='),\n Token(TokenType.INTEGER, 42),\n Token(TokenType.END_STATEMENT, 'End of Statement'),\n Token(TokenType.EOF, 'End of File')\n ]\n\n parser_response = Parser(tokens).parse()\n first_statement_ast = parser_response.ast.statements[0]\n\n assert len(parser_response.errors()) == 0\n\n assert type(first_statement_ast) is VariableDeclaration\n assert first_statement_ast.name == 'everything'\n\n assert type(first_statement_ast.initializer) is Integer\n assert first_statement_ast.initializer.value == 42", "def testFirstSet(self):\n\n self.node.desc = 'first description'\n\n self.assertEqual(\n ['first description', ],\n self.node.desc\n )" ]
[ "0.69212425", "0.64886594", "0.6375846", "0.6128011", "0.5978111", "0.5925981", "0.59075874", "0.58308166", "0.57873815", "0.572006", "0.5688921", "0.5654621", "0.5651131", "0.5596672", "0.5524254", "0.5473678", "0.5473099", "0.53852975", "0.53435594", "0.529892", "0.52969265", "0.5280814", "0.5269391", "0.5228468", "0.51454127", "0.51370144", "0.51370144", "0.5118967", "0.5118967", "0.51078135", "0.5100158", "0.5094874", "0.5035666", "0.50299376", "0.5020061", "0.50148827", "0.50147724", "0.5014655", "0.50087315", "0.49883127", "0.49730283", "0.49383217", "0.4934487", "0.4930285", "0.49256536", "0.4917248", "0.4871925", "0.48489216", "0.48435387", "0.48382992", "0.4836799", "0.4822837", "0.47931197", "0.47911665", "0.47885087", "0.4776693", "0.47611243", "0.4750141", "0.47446352", "0.47413853", "0.47167167", "0.4707111", "0.47021896", "0.46869642", "0.46754912", "0.4673063", "0.46702027", "0.4665884", "0.46115422", "0.46029294", "0.45937625", "0.4586357", "0.45834672", "0.45777547", "0.45620385", "0.4559551", "0.45541123", "0.45370513", "0.4510567", "0.44877815", "0.44814828", "0.44724563", "0.44622576", "0.44563457", "0.4454637", "0.4448313", "0.4433559", "0.44234088", "0.44202214", "0.44202214", "0.44127983", "0.44027075", "0.44013232", "0.43967736", "0.43929487", "0.43749502", "0.43619275", "0.4358946", "0.43557397", "0.43550685" ]
0.70178634
0
Visit assignment node with at least one compound target.
def visit_compound_assign(self, node): # Determine number of values (arity) of compound assignment. nvalues = { len(target.elts) for target in node.targets if is_sequence_node(target) } if len(nvalues) > 1: # A multiple, compound assignment with different arities, e.g., # `x,y = a,b,c = ...` is not a syntax error in Python, though it # probably should be because it's guaranteed to cause a runtime # error. Raise the error here, since we cannot proceed. raise SyntaxError("Multiple assignment with different arities") nvalues = nvalues.pop() # Assign temporary variables. temps = [ gensym() for i in range(nvalues) ] stmts = [] if is_sequence_node(node.value) and len(node.value.elts) == nvalues: # Special case: RHS is sequence literal of correct length. for i in range(nvalues): temp_target = to_name(temps[i], ast.Store()) stmts.append(ast.Assign([temp_target], node.value.elts[i])) else: # General case. temp_target = to_tuple( (to_name(temp, ast.Store()) for temp in temps), ast.Store()) stmts.append(ast.Assign([temp_target], node.value)) # Rewrite assignments as sequence of assignments. for target in reversed(node.targets): if is_sequence_node(target): stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i])) for i in range(nvalues)) else: temp_tuple = to_tuple(to_name(temp) for temp in temps) stmts.append(ast.Assign([target], temp_tuple)) return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts", "def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def process_assignment_ast(stmt_ast: ast.Assign, stmt_ast_parent_block):\n logger.log.info(\"Generating SymbolicState instance from assignment ast\")\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(\"Instantiating symbolic state for AST instance stmt_ast = %s\" % stmt_ast)\n # determine the program variables assigned on the left-hand-side\n targets: list = stmt_ast.targets\n # extract names - for now just care about normal program variables, not attributes or functions\n logger.log.info(\"Extracting list of assignment target names\")\n target_names: list = []\n for target in targets:\n target_names += extract_symbol_names_from_target(target)\n logger.log.info(\"List of all program variables changed is %s\" % target_names)\n # extract function names\n assigned_value = stmt_ast.value\n function_names: list = extract_function_names(assigned_value)\n logger.log.info(\"List of all program functions called is %s\" % function_names)\n # merge the two lists of symbols\n logger.log.info(\"Merging lists of assignment target names and function names\")\n all_symbols: list = target_names + function_names\n logger.log.info(\"List of all symbols to mark as changed in the symbolic state is %s\" % all_symbols)\n # set up a SymbolicState instance\n logger.log.info(\"Instantiating new StatementSymbolicState instance with all_symbols = %s\" % all_symbols)\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state", "def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()", "def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n attributes: List[util.SQLAlchemyAttribute],\n) -> None:\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if api.parse_bool(stmt.rvalue) is True:\n util.set_is_base(cls.info)\n return\n elif node.name == \"__tablename__\":\n util.set_has_table(cls.info)\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n apply.apply_mypy_mapped_attr(cls, api, item, attributes)\n\n left_hand_mapped_type: Optional[Type] = None\n left_hand_explicit_type: Optional[ProperType] = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup_qualified(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and mapped_sym.node is not None\n and names.type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = get_proper_type(\n stmt.type.args[0]\n )\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n else:\n node_type = get_proper_type(node.type)\n if (\n isinstance(node_type, Instance)\n and names.type_id_for_named_node(node_type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = get_proper_type(node_type.args[0])\n left_hand_mapped_type = node_type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node_type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n python_type_for_type = infer.infer_type_from_right_hand_nameexpr(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee\n )\n\n if python_type_for_type is None:\n return\n\n else:\n return\n\n assert python_type_for_type is not None\n\n attributes.append(\n util.SQLAlchemyAttribute(\n name=node.name,\n line=stmt.line,\n column=stmt.column,\n typ=python_type_for_type,\n info=cls.info,\n )\n )\n\n apply.apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )", "def test_obj_action_for_assignments():\n grammar = r\"\"\"\n S: a=\"foo\" b?=\"bar\" c=C+;\n C: val=\"baz\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = Parser(g)\n\n result = p.parse(\"foo bar baz baz baz\")\n\n assert isinstance(result, g.classes['S'])\n assert isinstance(result.c[0], g.classes['C'])\n\n assert result.a == \"foo\"\n assert result.b is True\n assert len(result.c) == 3\n assert all((c.val == \"baz\" for c in result.c))", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))", "def get_assignment_literal_value(self):\n if not self.is_single_assign:\n raise ValueError(\n \"Statement is not an assignment to a single name: %s\" % self)\n n = self.ast_node\n target_name = n.targets[0].id\n literal_value = ast.literal_eval(n.value)\n return (target_name, literal_value)", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret", "def __init__(self, target, stack, values, p):\n\n self.stack = stack\n \n for value in values:\n # Feed the extension result to the target:\n extended = Assignment(p=p, value=value) | target\n # Stack ``extended``:\n self.stack ^ extended", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)", "def visit_Compound(self, node):\n for statement in node.statements:\n self.visit(statement)", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def mutate_single_node(self, node, operator):\n if node.__class__ is operator[0] or (operator[1] is StatementDeletion and node.__class__ is ast.Pass):\n mutated_node = operator[1].mutate(node)\n node = mutated_node\n\n return node", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def convert_assign_value(g, op, block):\n\n keys = [\"bool_values\", \"fp32_values\", \"int32_values\", \"int64_values\"]\n dtypes = [\"bool\", \"float32\", \"int32\", \"int64\"]\n for i, key in enumerate(keys):\n dtype = dtypes[i]\n value = np.array(op.attr(key)).astype(dtype)\n if value is not None and value.size >= 1:\n break\n shape = op.attr(\"shape\")\n value = value.reshape(shape)\n out = _op.const(value, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def visit_Assign(self, node: Assign) -> None:\n\n node_type = type(node.right).__name__\n if isinstance(node.right, String):\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n c_str = self.builder.alloca(instruct.type)\n self.builder.store(instruct, c_str)\n self.builder.ret_void()\n else:\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n self.builder.ret(instruct)\n\n self.GLOBAL_MEMORY[node.left.value] = instruct", "def visit_assignname( # pylint: disable=too-many-branches\n self, node: nodes.AssignName\n ) -> None:\n frame = node.frame(future=True)\n assign_type = node.assign_type()\n\n # Check names defined in comprehensions\n if isinstance(assign_type, nodes.Comprehension):\n self._check_name(\"inlinevar\", node.name, node)\n\n # Check names defined in module scope\n elif isinstance(frame, nodes.Module):\n # Check names defined in Assign nodes\n if isinstance(assign_type, nodes.Assign):\n inferred_assign_type = utils.safe_infer(assign_type.value)\n\n # Check TypeVar's and TypeAliases assigned alone or in tuple assignment\n if isinstance(node.parent, nodes.Assign):\n if self._assigns_typevar(assign_type.value):\n self._check_name(\"typevar\", assign_type.targets[0].name, node)\n return\n if self._assigns_typealias(assign_type.value):\n self._check_name(\"typealias\", assign_type.targets[0].name, node)\n return\n\n if (\n isinstance(node.parent, nodes.Tuple)\n and isinstance(assign_type.value, nodes.Tuple)\n # protect against unbalanced tuple unpacking\n and node.parent.elts.index(node) < len(assign_type.value.elts)\n ):\n assigner = assign_type.value.elts[node.parent.elts.index(node)]\n if self._assigns_typevar(assigner):\n self._check_name(\n \"typevar\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n if self._assigns_typealias(assigner):\n self._check_name(\n \"typealias\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n\n # Check classes (TypeVar's are classes so they need to be excluded first)\n elif isinstance(inferred_assign_type, nodes.ClassDef):\n self._check_name(\"class\", node.name, node)\n\n # Don't emit if the name redefines an import in an ImportError except handler.\n elif not _redefines_import(node) and isinstance(\n inferred_assign_type, nodes.Const\n ):\n self._check_name(\"const\", node.name, node)\n else:\n self._check_name(\n \"variable\", node.name, node, disallowed_check_only=True\n )\n\n # Check names defined in AnnAssign nodes\n elif isinstance(assign_type, nodes.AnnAssign):\n if utils.is_assign_name_annotated_with(node, \"Final\"):\n self._check_name(\"const\", node.name, node)\n elif self._assigns_typealias(assign_type.annotation):\n self._check_name(\"typealias\", node.name, node)\n\n # Check names defined in function scopes\n elif isinstance(frame, nodes.FunctionDef):\n # global introduced variable aren't in the function locals\n if node.name in frame and node.name not in frame.argnames():\n if not _redefines_import(node):\n self._check_name(\"variable\", node.name, node)\n\n # Check names defined in class scopes\n elif isinstance(frame, nodes.ClassDef):\n if not list(frame.local_attr_ancestors(node.name)):\n for ancestor in frame.ancestors():\n if utils.is_enum(ancestor) or utils.is_assign_name_annotated_with(\n node, \"Final\"\n ):\n self._check_name(\"class_const\", node.name, node)\n break\n else:\n self._check_name(\"class_attribute\", node.name, node)", "def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)", "def can_merge(self, target):\n if self.parentnode_id != target.parentnode_id:\n raise ValidationError(\n gettext_lazy('Cannot merge self into target, self and target is not part of same AssignmentGroup')\n )", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def visitNode(node,doublevars,doublevars_modified):\n children = []\n doublevars_predefined = set()\n if hasattr(node, \"content\"):\n children = node.content\n elif hasattr(node, \"items\"):\n children = node.items\n elif type(node) in (tuple, list):\n children = node\n for child in children:\n if(type(child)==fparser.one.statements.Assignment):\n lhs = cleanVariableName(child.variable)\n # Visit an assignment statement, e.g. \"a = b + c\"\n if(lhs in doublevars):\n doublevars_modified.add(lhs)\n rhs = child.expr\n readDoubleVars = set(filter(lambda x: x in rhs, doublevars))\n doublevars_predefined = doublevars_predefined.union(readDoubleVars.difference(doublevars_modified))\n else:\n newmodified, newpredefined = visitNode(child, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n return doublevars_modified, doublevars_predefined", "def verify_assign(self, d_stmt, table):\n lvalue = DanaExpr.factory(d_stmt.find_first_child(\"p_lvalue\"), table)\n expr = DanaExpr.factory(d_stmt.find_first_child(\"p_expr\"), table)\n self.exprs = [lvalue, expr]\n\n expr.type.check_type(d_stmt.linespan, lvalue.type)\n expr.type.in_types(d_stmt.linespan, [DanaType(\"int\"), DanaType(\"byte\")])", "def process_call_byref_assign(topconstruct):\n for topcalls in query([is_layering([syntax.CALL, syntax.ASSIGNMENT, syntax.PROGRAM])], TreeItem(topconstruct)):\n assignment = topcalls.parent_item\n #c = topcalls.construct\n # -- check the args of this call: do them contain a reference\n # we need to find all the\n refs = query([is_layering([syntax.REFERENCE, syntax.CALL, syntax.ASSIGNMENT, syntax.PROGRAM])], topcalls)\n if len(refs) > 0:\n var_names = list(map(lambda r: r.construct.args[0].args[0], refs))\n var_names.insert(0, assignment.construct.args[0])\n res_tuple = syntax.Construct(syntax.PY_TUPLE, var_names)\n # here we need to create a tuple\n assignment.construct.args[0] = res_tuple", "def set_rhs(self):\n pass", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def test_multi_source_explicit(self):\n with Graph('g') as graph:\n graph.source | Node('a') | graph.sink\n graph.source * 'out2' | Node('b') | 'in2' * graph.sink", "def _compat_assign_gast_5(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value, type_comment=type_comment)", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def get_assignment(self):\n assignment = Assignment()\n for effect in self._sub_effects:\n if not effect._negated:\n assignment.add_pair(effect.get_variable() + \"'\", effect.get_value())\n else:\n assignment.add_pair(effect.get_variable() + \"'\", ValueFactory.none())\n return assignment", "def _parse_initial_assignments(self, model, comp, node):\n node = dom_child(node, 'initialAssignment')\n while node:\n var = str(node.getAttribute('symbol')).strip()\n var = self._convert_name(var)\n if var in comp:\n self.log('Parsing initial assignment for \"' + var + '\".')\n var = comp[var]\n expr = parse_mathml_rhs(dom_child(node, 'math'), comp, self)\n if var.is_state():\n # Initial value\n var.set_state_value(expr, default=True)\n else:\n # Change of value\n var.set_rhs(expr)\n else:\n raise SBMLError('Initial assignment found for unknown'\n ' parameter <' + var + '>.')\n node = dom_next(node, 'initialAssignment')", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def update_graph_compound_costs(self):\n\n # # # Check if all costs are available\n if not self.compound_costs_solved:\n unsolved_cmp = [key for key, _ in self.compound_costs.items()]\n raise RuntimeError(\"The following cmp have no cost assigned:\\n\" + str(unsolved_cmp) +\n \"\\nReconsider the starting conditions.\")\n # # # Reset unique_iterator_list as graph changes\n self._reset_iterator_memory()\n for node in self.compound_costs.keys():\n # # # Loop over all edges of compound and manipulate weight\n for target_node, attributes in self.graph_handler.graph[node].items():\n required_compound_costs = np.asarray([self.compound_costs[k] for k in attributes['required_compounds']])\n tot_required_compound_costs = np.sum(required_compound_costs)\n # # # Set required compound costs in edge\n self.graph_handler.graph.edges[node,\n target_node]['required_compound_costs'] = tot_required_compound_costs\n # # # Add required compound costs to weight\n self.graph_handler.graph.edges[node, target_node]['weight'] += tot_required_compound_costs", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def node_assignment(edge_index: nb.int64[:,:],\n edge_label: nb.int64[:],\n n: nb.int64) -> nb.int64[:]:\n # Loop over on edges, reset the group IDs of connected node\n on_edges = edge_index[np.where(edge_label)[0]]\n return union_find(on_edges, n)[0]", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def visit_Assign(self, node):\n var_name = node.left.token.value\n self.GLOBAL_SCOPE[var_name] = self.visit(node.right)", "def visit_Compound(self, node):\n for child in node.children:\n self.visit(child)", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def test_uc_to_assignment(self):\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['Unassigned'], 1.0, 1),\r\n 'q4': (['Unassigned'], 1.0, 1),\r\n 'q5': (['Unassigned'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)\r\n\r\n # change label for unassignable\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['x'], 1.0, 1),\r\n 'q4': (['x'], 1.0, 1),\r\n 'q5': (['x'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp,\r\n 'unassignable_label': 'x'}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)", "def test_statement_initialized_by_assignment():\n shap = Statement(shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\")\n shap2 = Statement()\n shap2.shape_id = \"@photo\"\n shap2.prop_id = \"dcterms:creator\"\n shap2.value_type = \"URI\"\n assert shap == shap2", "def __init__(self, lhs, rhs):\n assert _chktype(1, lhs, Nonterminal)\n self._lhs = lhs\n self._rhs = tuple(rhs)", "def any(self):\n return self.__node_a", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def test_tuple_assign(self):\r\n def local_test(x,y):\r\n m1=Module()\r\n m1.l=(x(), y())\r\n\r\n # create a Method that makes the second list element a shared Member\r\n m1.g=Method([], m1.l[0])\r\n m1.f=Method([], m1.l[1])\r\n m = m1.make()\r\n\r\n #assign 4 and 5 to the two variables' containers in m\r\n m.l = (4, 5)\r\n assert 5 == m.f()\r\n assert 4 == m.g()\r\n\r\n local_test(lambda:T.dscalar(),lambda:T.dscalar())", "def assign_ids(ast):\n def f_either(obj, *child_results):\n id_ = slast.SlAst.id_\n obj.id_ = id_[0]\n id_[0] += 1\n\n # def f_either(obj, *child_results):\n # _id_dict = slast.SlAst._id_dict\n # id_ = slast.SlAst.id_\n # # FIXME: Assign same id to all data predicate calls with the same root/stop-nodes\n # key = str(obj.to_sl_expr())\n # if key in _id_dict:\n # obj.id_ = _id_dict[key]\n # else:\n # obj.id_ = id_[0]\n # _id_dict[key] = id_[0]\n # id_[0] += 1\n\n astutils.fold(f_either, f_either, ast)", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def __init__(self, *args):\n this = _libsbml.new_AssignmentRule(*args)\n try: self.this.append(this)\n except: self.this = this", "def _assigns_typealias(node: nodes.NodeNG | None) -> bool:\n inferred = utils.safe_infer(node)\n if isinstance(inferred, nodes.ClassDef):\n if inferred.qname() == \".Union\":\n # Union is a special case because it can be used as a type alias\n # or as a type annotation. We only want to check the former.\n assert node is not None\n return not isinstance(node.parent, nodes.AnnAssign)\n elif isinstance(inferred, nodes.FunctionDef):\n if inferred.qname() == \"typing.TypeAlias\":\n return True\n return False", "def test_remove_assignment_rule(self):\n pass", "def assign_weights(root, value, name=None):\n assign_ops = []\n\n def assign(node):\n if isinstance(node, Weights):\n assign_ops.append(node.assign(value))\n\n with tf.name_scope(name, \"AssignWeights\", [root, value]):\n # Get all assignment operations\n traverse_graph(root, fun=assign, skip_params=False)\n\n # Return a collective operation\n return tf.group(*assign_ops)", "def test_compiler_assignment(patch, compiler, lines, tree):\n patch.many(Objects, ['names', 'entity'])\n tree.assignment_fragment.service = None\n tree.assignment_fragment.mutation = None\n compiler.assignment(tree, '1')\n Objects.names.assert_called_with(tree.path)\n fragment = tree.assignment_fragment\n entity = get_entity(fragment.expression)\n Objects.entity.assert_called_with(entity)\n kwargs = {'name': Objects.names(), 'args': [Objects.entity()],\n 'parent': '1'}\n lines.append.assert_called_with('set', tree.line(), **kwargs)", "def rhs(self):\n if not self.is_assign():\n raise AssertionError('Not an assignment')\n return self.initializer", "def assign_value(Xj, Xk, csp, assignment):\r\n parent_assignment = assignment[Xj]\r\n for val in csp.curr_domains[Xk]:\r\n if csp.constraints(Xj, parent_assignment, Xk, val):\r\n return val\r\n\r\n # No consistent assignment available\r\n return None", "def test_variable_compound_assign_boxed_return(self):\n env = self.trace('foo, bar = objects.create_foo_and_bar()')\n\n events = self.variable_events\n event = next(evt for evt in events if isinstance(evt, TraceAssign))\n self.assertEqual(event.name, ('foo','bar'))\n self.assertEqual(event.value, (env['foo'],env['bar']))\n self.assertIsInstance(event.value_event, TraceReturn)\n self.assertEqual(event.value_event.function, objects.create_foo_and_bar)\n self.assertTrue(event.value_event.multiple_values)", "def calc_operand_assignments(self, op_keys, input_chunk_metas=None):\n graph = self._graph\n op_states = self._op_states\n cur_assigns = OrderedDict(self._fixed_assigns)\n expect_workers = dict()\n\n key_to_chunks = defaultdict(list)\n for n in graph:\n key_to_chunks[n.op.key].append(n)\n if n.op.expect_worker is not None:\n expect_workers[n.op.key] = cur_assigns[n.op.key] = n.op.expect_worker\n\n descendant_readies = set()\n op_keys = set(op_keys)\n requested_chunks = [key_to_chunks[k][0] for k in op_keys]\n chunks_to_assign = [c for c in requested_chunks if c.op.expect_worker is None]\n\n if any(graph.count_predecessors(c) for c in chunks_to_assign):\n graph = graph.copy()\n for c in graph:\n if c.op.key not in op_keys:\n continue\n for pred in graph.predecessors(c):\n graph.remove_edge(pred, c)\n\n assigned_counts = defaultdict(lambda: 0)\n worker_op_keys = defaultdict(set)\n if cur_assigns:\n for op_key, state in op_states.items():\n if op_key not in op_keys and state == OperandState.READY \\\n and op_key in cur_assigns:\n descendant_readies.add(op_key)\n assigned_counts[cur_assigns[op_key]] += 1\n\n # calculate the number of nodes to be assigned to every worker\n # given number of workers and existing assignments\n pre_worker_quotas = self._calc_worker_assign_limits(\n len(chunks_to_assign) + len(descendant_readies), assigned_counts)\n\n # pre-assign nodes given pre-determined transfer sizes\n if not input_chunk_metas:\n worker_quotas = pre_worker_quotas\n else:\n for op_key, worker in self._iter_assignments_by_transfer_sizes(\n pre_worker_quotas, input_chunk_metas):\n if op_key in cur_assigns or op_key not in op_keys:\n continue\n assigned_counts[worker] += 1\n cur_assigns[op_key] = worker\n worker_op_keys[worker].add(op_key)\n\n worker_quotas = self._calc_worker_assign_limits(\n len(chunks_to_assign) + len(descendant_readies), assigned_counts)\n\n if cur_assigns:\n # calculate ranges of nodes already assigned\n for op_key, worker in self._iter_successor_assigns(cur_assigns):\n cur_assigns[op_key] = worker\n worker_op_keys[worker].add(op_key)\n\n logger.debug('Worker assign quotas: %r', worker_quotas)\n\n # calculate expected descendant count (spread range) of\n # every worker and subtract assigned number from it\n average_spread_range = len(graph) * 1.0 / len(self._worker_slots)\n spread_ranges = defaultdict(lambda: average_spread_range)\n for worker in cur_assigns.values():\n spread_ranges[worker] -= 1\n\n logger.debug('Scan spread ranges: %r', dict(spread_ranges))\n\n # assign pass 1: assign from fixed groups\n sorted_workers = sorted(worker_op_keys, reverse=True, key=lambda k: len(worker_op_keys[k]))\n for worker in sorted_workers:\n start_chunks = reduce(operator.add, (key_to_chunks[op_key] for op_key in worker_op_keys[worker]))\n self._assign_by_bfs(start_chunks, worker, worker_quotas, spread_ranges,\n op_keys, cur_assigns, graph=graph)\n\n # assign pass 2: assign from other nodes to be assigned\n sorted_candidates = [v for v in chunks_to_assign]\n while max(worker_quotas.values()):\n worker = max(worker_quotas, key=lambda k: worker_quotas[k])\n try:\n cur = sorted_candidates.pop()\n while cur.op.key in cur_assigns:\n cur = sorted_candidates.pop()\n except IndexError: # pragma: no cover\n break\n self._assign_by_bfs(cur, worker, worker_quotas, spread_ranges, op_keys,\n cur_assigns, graph=graph)\n\n keys_to_assign = {n.op.key: n.op for n in requested_chunks}\n assignments = {k: v for k, v in cur_assigns.items() if k in keys_to_assign}\n assignments.update({k: v for k, v in expect_workers.items() if k in keys_to_assign})\n return assignments", "def pair_node_to_var(tree, c):\n # find parent Binary operator\n while True:\n old = c\n c = next(iter(tree.predecessors(c)))\n if c.type == 'operator':\n if len(c.operands) == 2:\n break\n p, q = tree.successors(c)\n v = p if q == old else q\n # go down until terminal found\n # assuming correct syntax for gr1c\n while True:\n if not tree.succ.get(v):\n break\n v = next(iter(tree.successors(v)))\n # now: b, is the operator and: v, the variable\n return v, c", "def add_assignment_clause(self, clause):\r\n if not isinstance(clause, AssignmentClause):\r\n raise StatementException(\"only instances of AssignmentClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.assignments.append(clause)", "def _init_targets(self):\n for ga_main, ga_targ in zip(self.ga.variables, self.ga_.variables):\n ga_targ.assign(ga_main)\n if self.use_lyapunov:\n for lc_main, lc_targ in zip(self.lc.variables, self.lc_.variables):\n lc_targ.assign(lc_main)\n else:\n for q_1_main, q_1_targ in zip(self.q_1.variables, self.q_1_.variables):\n q_1_targ.assign(q_1_main)\n for q_2_main, q_2_targ in zip(self.q_2.variables, self.q_2_.variables):\n q_2_targ.assign(q_2_main)", "def visit_Compound(self, node: Compound) -> None:\n\n for child in node.children:\n self.visit(child)", "def parse_ast_args(cls, ast_args: List) -> Union[tree.AstNode, List[tree.AstNode]]:\n if cls == tree.Declaration and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[2] == '['):\n var_type, identifier, _, valInt, _ = ast_args\n ast_args[0] = ast_args[0] + str(tree.Identifier(valInt.value))\n ast_args = ast_args[: 2]\n else:\n print(ast_args)\n var_type, identifier, expr = ast_args\n if isinstance(expr, tree.Assignment):\n # We should raise an error somehow if there's no previous declaration of the variable here.\n # A good solution would maintain a mapping to the original source code so we can show where the error is.\n # We want to move the assignment node one up so it is **sibling** to this declaration node.\n # Then the declaration should be made with the value of the assigned variable.\n ast_args[2] = tree.Identifier(expr.identifier.name)\n return [expr, parse_ast_args(cls, ast_args)]\n\n if cls == tree.Function:\n # Sometimes we don't have function arguments. I don't know how to handle it but here, rearranging args order.\n assert len(ast_args) in {3, 4}\n if len(ast_args) == 4:\n # Swap function args and body so it works with our class' constructor default args.\n ast_args[2], ast_args[3] = ast_args[3], ast_args[2]\n\n if cls == tree.Expr and any(op in ast_args for op in tree.BinOp.OPERATORS):\n # We want to parse 4 / 3 * 2 with left-associativity. (it should output 2)\n # It means we need to parse the multiplication first\n *left_hand_side, op, right_hand_side = ast_args\n assert op in tree.BinOp.OPERATORS, \"Operator should be in second place in the token list\"\n\n if len(left_hand_side) > 1:\n # We need to parse something like 1 + 2 + 3 + 4\n left_hand_side = parse_ast_args(cls, left_hand_side)\n else:\n # The right hand side is a single expression, it was already parsed into an ast.\n left_hand_side = left_hand_side[0]\n\n return tree.BinOp(left_hand_side, op, right_hand_side)\n\n # We 'unnest' the structure - these classes are abstract so we are rly interested in what they contain.\n if cls == tree.Expr:\n assert len(ast_args) == 1\n return ast_args[0]\n if cls == tree.Statement:\n return ast_args[0] if ast_args else None\n\n # Hack. Esp since some 'class_name' refer to functions.\n if \"\\t\" in ast_args:\n ast_args.remove(\"\\t\")\n\n if cls == tree.Assignment and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[1] == '['):\n identifier, _, valInt, _, expres = ast_args\n identifier.name = identifier.name + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args[1] = expres\n ast_args = ast_args[: 2]\n\n if cls == tree.Identifier and len(ast_args) > 1:\n if (ast_args[1] == '['):\n identifier, _, valInt, _ = ast_args\n tmp = str(valInt)[:10]\n if tmp == \"Identifier\":\n identifier = identifier + \"[\" + valInt.name + \"]\"\n else:\n identifier = identifier + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args = ast_args[: 1]\n\n return cls(*ast_args)", "def test_augassign_recursion():\n # infinitely recurses in python\n code = \"\"\"\n def rec():\n a = 0\n a += rec()\n return a\n rec()\n \"\"\"\n cls_node = extract_node(code)\n assert next(cls_node.infer()) is util.Uninferable" ]
[ "0.7261902", "0.6757224", "0.64843816", "0.63659924", "0.624103", "0.62127984", "0.6177989", "0.61282015", "0.60786444", "0.604624", "0.60448", "0.5942896", "0.59183824", "0.5750465", "0.57303625", "0.5648947", "0.55960566", "0.5571266", "0.5543112", "0.5511169", "0.5493238", "0.54690856", "0.54233736", "0.5397378", "0.537382", "0.53426856", "0.5236439", "0.5229051", "0.52111745", "0.51824206", "0.5079902", "0.50791276", "0.5077424", "0.50616944", "0.50447536", "0.50001746", "0.49659243", "0.49501982", "0.49262443", "0.49216574", "0.49170986", "0.49095604", "0.49059328", "0.48852447", "0.48602328", "0.48588762", "0.48397547", "0.48292038", "0.4825946", "0.48224682", "0.48159406", "0.4782712", "0.47812113", "0.47812113", "0.4762241", "0.47500294", "0.47329715", "0.472453", "0.4724379", "0.4698589", "0.46561518", "0.4650297", "0.4644453", "0.46421733", "0.46267986", "0.4594375", "0.4592482", "0.45786417", "0.45670465", "0.45591745", "0.45529163", "0.45429352", "0.4532701", "0.45313293", "0.45230484", "0.45223278", "0.44819808", "0.44805884", "0.4472046", "0.4471105", "0.44651118", "0.44518712", "0.44492084", "0.44458848", "0.44296232", "0.44236612", "0.4405466", "0.439008", "0.43860057", "0.43741378", "0.43688184", "0.43673506", "0.43663803", "0.4360598", "0.4357551", "0.43476838", "0.4344408", "0.43360808", "0.43342733", "0.43260112" ]
0.7176904
1
Replace multiple deletion with single deletions.
def visit_Delete(self, node): self.generic_visit(node) if len(node.targets) > 1: return [ ast.Delete([node.target]) for target in node.targets ] return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_deletions(murim_mutations):\n\n pass", "def del_primers(primers,\n deletions):\n \n # Sort primers in reverse order so indices remain correct during deletion\n deletions.sort(reverse=True)\n for n in deletions:\n del primers[n]\n return primers", "def delete_multiple(self, idxes, sort = True):\n if(sort): idxes.sort() # inplace\n # forced to loop but it's better than using np.remove which is not inplace.\n for idx in np.flip(idxes): # view = constant time \n self.arr[idx] = self.arr[self.current-1]\n self.current -= 1", "def undelete(self,\r\n undeletelist=None,\r\n update_table=True):\r\n\r\n m_temp = iter([a_temp for a_temp\r\n in range(1,len(self.indexes()*2))\r\n if str(a_temp)\r\n not in self.indexes()])\r\n\r\n\r\n # iter function is used to find free spots for the notes to be undeleted\r\n\r\n if undeletelist is None:\r\n undeletelist = [Index(x_temp)\r\n for x_temp in\r\n self.find_within(indexfrom=None,\r\n indexto=0)]\r\n\r\n for u in undeletelist:\r\n print(PERIOD,end=EMPTYCHAR)\r\n\r\n self.move(u,\r\n Index(next(m_temp)),\r\n withchildren=True,\r\n update_table=update_table)\r\n print()", "def define_deletions(genome, num):\n start = []\n end = []\n for n in range(num):\n start_pos, end_pos = get_del_pos(genome)\n # add deletion Variants to genome list\n var = Variant(\"deletion\", start_pos, end_pos, start_pos-end_pos)\n genome.add_variant(var)\n # add to unavail list\n for j in range(start_pos, end_pos):\n genome.unavail_pos.append(j)", "def syncrepl_delete(self, uuids):\n pass", "def delete_many(self, keys):\n raise NotImplementedError()", "def note_delete(self, del_note_handle_list):\n rebuild = False\n for handle in del_note_handle_list :\n while self.data.count(handle) > 0:\n self.data.remove(handle)\n rebuild = True\n if rebuild:\n self.rebuild()", "async def delete(self, ctx, ids: commands.Greedy[int]):\n\n result = await self.bot.mongo.db.action.delete_many(\n {\"_id\": {\"$in\": ids}, \"guild_id\": ctx.guild.id}\n )\n word = \"entry\" if result.deleted_count == 1 else \"entries\"\n await ctx.send(f\"Successfully deleted {result.deleted_count} {word}.\")", "def _delete_bulk(self, iterable):\n self.cursor.executemany(self.DELETE, iterable)\n self.conn.commit()", "async def delete(self, ctx, ids: commands.Greedy[int]):\n\n result = await self.bot.mongo.db.reminder.delete_many(\n {\n \"_id\": {\"$in\": ids},\n \"guild_id\": ctx.guild.id,\n \"resolved\": False,\n \"user_id\": ctx.author.id,\n }\n )\n word = \"entry\" if result.deleted_count == 1 else \"entries\"\n await ctx.send(f\"Successfully deleted {result.deleted_count} {word}.\")\n self.clear_current()\n self.bot.loop.create_task(self.update_current())", "def _extract_deletes(self, query) :\n\t\tsparql = self.n.sparql\n\t\t\n\t\t# because the loop below alter's the contents of each insert\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# grab the insert list\n\t\tdeletes = query[sparql.delete]\n\t\t\n\t\tnew_deletes = []\n\t\tfor delete in deletes :\n\t\t\tif sparql.delete in delete :\n\t\t\t\tvar = delete[sparql.subject]\n\t\t\t\tpredicate = delete[sparql.predicate]\n\t\t\t\t\n\t\t\t\tdel delete[sparql.subject]\n\t\t\t\tdel delete[sparql.predicate]\n\t\t\t\t\n\t\t\t\tif predicate is None :\n\t\t\t\t\tnew_deletes.append(delete)\n\t\t\t\telse :\n\t\t\t\t\tnew_deletes.append({\n\t\t\t\t\t\tsparql.var : var,\n\t\t\t\t\t\tpredicate : delete,\n\t\t\t\t\t})\n\t\treturn new_deletes", "def deletes(f):\n f.deletes = True\n return f", "def multi_delete(isamAppliance, ids=[], comment=None, check_mode=False, force=False):\n if comment != None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj['data'] == {}:\n return isamAppliance.create_return_object(changed=False)\n else:\n if ids == []:\n ids = ret_obj['data']\n else:\n for snaps in ret_obj['data']:\n ids.append(snaps)\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\"Deleting one or multiple snapshots\", \"/snapshots/multi_destroy?record_ids=\" + \",\".join(ids))\n\n return isamAppliance.create_return_object()", "def replaceWithAndMaybeDelete(self, *args):\n return _libsbml.Replacing_replaceWithAndMaybeDelete(self, *args)", "def list_multi_del(lst:list, indices):\n indices = list(set(indices))\n for idx in sorted(indices, reverse=True):\n del lst[idx]", "def delete_many(self, keys):\n return self.delete_many_values(keys)", "def performDeletions(self):\n return _libsbml.Submodel_performDeletions(self)", "def do_command(self, args):\n compops = dbops.Completions()\n compops.delete(args)", "def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))", "def delete(self, ids: List[str], *args, **kwargs):\n super(BinaryPbDBMSIndexer, self).delete(ids)", "def test_delete_many(self):\n\n # That one fails in r5 (because of improper handling of batches)\n\n doc_count = 10\n ids = [get_rand_string() for x in range(doc_count)]\n\n # Same data and user_id for all documents\n data = user_id = get_rand_string()\n\n for id in ids:\n self.conn.add(id=id, data=data, user_id=user_id)\n self.conn.commit()\n\n # Make sure they've been added\n for id in ids:\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"Document (id:%s) should've been added to index\" % id)\n\n # Delete documents by their ID and commit changes\n self.conn.delete(ids)\n self.conn.commit()\n\n # Make sure they've been deleted\n for id in ids:\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n \"Document (id:%s) should've been deleted from index\" % id)", "def _MultiReplace(data, repl):\n res = []\n prev = 0\n for (lo, hi, s) in sorted(repl):\n if prev < lo:\n res.append(data[prev:lo])\n res.append(s)\n prev = hi\n res.append(data[prev:])\n return ''.join(res)", "def manage_deleteObjects(self, ids=(), REQUEST=None):\n for id in ids: self._delObject(id)\n if REQUEST:\n return self.callZenScreen(REQUEST)", "def validate_deletions(taglist):\n preexisting_keys = list_of_keys_of(taglist.current_list)\n keys_of_tags_to_delete = unicode_decode_keys(taglist.deletions)\n\n non_existent_key_set = list(set(keys_of_tags_to_delete) - set(preexisting_keys))\n\n if non_existent_key_set:\n raise_validation_error(\n problematic_key_set=non_existent_key_set,\n problem_message=strings['tags.tag_keys_dont_exist_for_deletion'],\n exception_class=InvalidAttemptToModifyTagsError\n )", "def test_del_handles_multiple_place_changes(robust):\n robust.delete(9)\n assert robust.balance() == 1\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n robust.delete(10)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n assert robust.balance() == 1\n assert robust.depth() == 5\n robust.delete(19)\n robust.delete(11)\n robust.delete(12)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 18\n )\n assert tuple(robust.breadth_first()) == (\n 8, 4, 16, 2, 6, 14, 18, 1, 3, 5, 7, 13, 15, 17\n )\n assert robust.balance() == 0\n assert robust.depth() == 4", "def delete_lists():\n del SAVE_EXISTENT[:]\n del SAVE_PRICE[:]\n del CARDS[:]", "def bulkDelete(self, objList: List[PermissionContext], tokenData: TokenData):", "def setDeletion(self, *args):\n return _libsbml.ReplacedElement_setDeletion(self, *args)", "def replace_tokens(self, tokens):\n details = []\n idx = 0\n if len(tokens) >= min_token_num:\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.vocab and self.get_random_prob() < self.token_prob:\n tokens[i] = self.get_delete_token()\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details", "def delete_order():", "def _delete_cached_contents(patch_list):\n # TODO(guido): No need to do this in a transaction.\n patches = []\n content_keys = []\n for patch in patch_list:\n try:\n content_key = patch.content_key\n except db.Error:\n content_key = None\n try:\n patched_content_key = patch.patched_content_key\n except db.Error:\n patched_content_key = None\n if content_key is not None:\n content_keys.append(content_key)\n if patched_content_key is not None:\n content_keys.append(patched_content_key)\n patch.content_key = None\n patch.patched_content_key = None\n patches.append(patch)\n if content_keys:\n logging.info(\"Deleting %d contents\", len(content_keys))\n ndb.delete_multi(content_keys)\n if patches:\n logging.info(\"Updating %d patches\", len(patches))\n ndb.put_multi(patches)", "def deindex(self, values=None):\n if values is None:\n values = self.proxy_get()\n for value in values:\n self.deindex_value(value)", "def multi_del(self, keys, no_update_log=False):\n # TODO: write better documentation: why would user need the no_update_log param?\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n if not isinstance(keys, (list, tuple)):\n keys = list(keys)\n\n wait(self.proto.misc(\"outlist\", keys, opts))", "def remove_many_descriptors(self, uuids):\n # Chunk up operation based on max clauses available to us\n\n def batch_op(_batch):\n \"\"\"\n :param _batch: UIDs to remove from index.\n :type _batch: collections.Iterable[collections.Hashable]\n \"\"\"\n uuid_query = ' OR '.join([self.d_uid_field + (':%s' % str(_uid))\n for _uid in _batch])\n self.solr.delete(\"%s:%s AND (%s)\"\n % (self.index_uuid_field, self.index_uuid,\n uuid_query))\n\n batch = collections.deque()\n for uid in uuids:\n batch.append(uid)\n\n # Will end up using max_clauses-1 OR statements, and one AND\n if len(batch) == self.max_boolean_clauses:\n batch_op(batch)\n batch.clear()\n\n # tail batch\n if batch:\n batch_op(batch)", "def delete_all(self):\n raise NotImplementedError()", "def delete():", "def _update_Deltas(self, a, deltas, Deltas):\n \n updated_Deltas = []\n a = a[-2::-1] \n for Delta, delta, ai in zip(reversed(Deltas), deltas, a):\n updated_Deltas.insert(0, Delta + np.outer(delta, ai))\n \n return updated_Deltas", "def delete(delete_targets: list[str], yes: bool) -> None: # type: ignore (not accessed)\n\n def delete_target(target: str) -> None:\n tag = Tag.from_str(target)\n\n if tag.version is None:\n to_delete_bentos = bento_store.list(target)\n else:\n to_delete_bentos = [bento_store.get(tag)]\n\n for bento in to_delete_bentos:\n if yes:\n delete_confirmed = True\n else:\n delete_confirmed = click.confirm(f\"delete bento {bento.tag}?\")\n\n if delete_confirmed:\n bento_store.delete(bento.tag)\n logger.info(\"%s deleted.\", bento)\n\n for target in delete_targets:\n delete_target(target)", "def _del_files(self, index_key, _del_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n for _file in _del_list:\n _file_name=self._item_from_index(_file, 'filename', _index)\n if _file_name:\n try:\n self.rmfile(_file_name)\n except Exception, e:\n self.log('Failed to delete file %s: %s'%(_file_name, str(e)))", "def DeleteFood(r, foods):\n hasError = False\n for i in foods:\n try:\n f = r.food_set.get(pk=i[\"id\"])\n f.src.delete()\n f.delete()\n except:\n print(\"delete error\")\n hasError = True\n return hasError", "def batch_process(self, delete_list=[], update_list=[]):\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(\n self.request_url,\n data=json.dumps(payload),\n headers=self.default_headers,\n timeout=30\n )\n\n return r.status_code, r.json()", "def _generate_delete_sql(self, delete_keys):\n for key in delete_keys:\n app_label, sql_name = key\n old_node = self.from_sql_graph.nodes[key]\n operation = DeleteSQL(sql_name, old_node.reverse_sql, reverse_sql=old_node.sql)\n sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children]\n sql_deps.append(key)\n self.add_sql_operation(app_label, sql_name, operation, sql_deps)", "def manage_delObjects(self, ids=[], REQUEST=None):\n if isinstance(ids, str):\n ids = [ids]\n if not ids:\n raise BadRequest('No items specified')\n try:\n p = self._reserved_names\n except Exception:\n p = ()\n for n in ids:\n if n in p:\n raise BadRequest('Not Deletable')\n while ids:\n id = ids[-1]\n v = self._getOb(id, self)\n\n try:\n if v.wl_isLocked():\n raise ResourceLockedError(\n 'Object \"%s\" is locked.' % v.getId())\n except AttributeError:\n pass\n\n if v is self:\n raise BadRequest('%s does not exist' %\n html.escape(ids[-1], True))\n self._delObject(id)\n del ids[-1]\n if REQUEST is not None:\n return self.manage_main(self, REQUEST)", "def _two_edits_deleted_variations(token):\n return (\n two_edits_distance_of_word for one_edit_distance_of_word in\n SymmetricDeleteCorrector._one_edit_deleted_variations(token) for two_edits_distance_of_word in\n SymmetricDeleteCorrector._one_edit_deleted_variations(one_edit_distance_of_word))", "def delete(self, ids):\n\n if self.cluster:\n return self.cluster.delete(ids)\n\n return super().delete(ids)", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete_many(self, keys):\n try:\n if keys:\n self._cache.delete(*map(self.prepare_key, keys))\n except Exception as err:\n return self.warn_or_error(err)", "def delete_from_index(self,delete_list):\n self.__mode = self.WRITE_MODE\n if not self.__storage:\n self.__load_index()\n \n try:\n for to_delete in delete_list:\n if self.__storage.has_key(to_delete):\n del self.__storage[to_delete]\n except Exception,e:\n print e\n self.__storage = None\n return False\n \n self.__close_storage()\n return True", "def delete_keys_tags(self,\r\n index,\r\n deletedkeys):\r\n\r\n\r\n for k_temp in deletedkeys:\r\n k_temp = k_temp.strip()\r\n if k_temp in set(self.get_keys()):\r\n self.discard_index_from_key(k_temp, index)\r\n if self.get_indexes_for_key(k_temp) == set():\r\n self.eliminate_key(k_temp)\r\n for t_temp in self.get_tags():\r\n if k_temp in self.get_keys_for_tag(t_temp):\r\n self.discard_key_from_tag(t_temp,k_temp)\r\n if not self.get_keys_for_tag(t_temp):\r\n self.delete_tag(t_temp)", "def post_exec(self):\n \n for task in self.tasks.values():\n for elem in task.objects.values():\n if elem.isdelete:\n self.uow._remove_deleted(elem.obj)\n else:\n self.uow.register_clean(elem.obj)", "def get_delete_statement(self, val, prev, ctx):\r\n if val is prev is None:\r\n return []\r\n\r\n val = self.to_database(val)\r\n prev = self.to_database(prev)\r\n if isinstance(val, self.Quoter): val = val.value\r\n if isinstance(prev, self.Quoter): prev = prev.value\r\n\r\n old_keys = set(prev.keys()) if prev else set()\r\n new_keys = set(val.keys()) if val else set()\r\n del_keys = old_keys - new_keys\r\n\r\n del_statements = []\r\n for key in del_keys:\r\n field_id = uuid1().hex\r\n ctx[field_id] = key\r\n del_statements += ['\"{}\"[:{}]'.format(self.db_field_name, field_id)]\r\n\r\n return del_statements", "def delete_multi(self, keys, dead_time=0, key_prefix=''):\n\n\t\tserver_keys, _deprefix = yield self._map_keys_to_servers(keys, key_prefix)\n\n\t\t# send out all requests on each server before reading anything\n\t\tdead_servers = []\n\n\t\tif dead_time is None:\n\t\t\tdead_time = 0\n\n\t\tret = True\n\n\t\tfor server in server_keys.iterkeys():\n\t\t\tcommands = []\n\t\t\tfor prefixed_key, _original_key in server_keys[server]:\n\t\t\t\tcommands.append(\"delete %s %d\\r\\n\" % (prefixed_key, dead_time))\n\n\t\t\ttry:\n\t\t\t\tserver.send_cmds(''.join(commands))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tdead_servers.append(server)\n\n\t\t# if any servers died on the way, don't expect them to respond.\n\t\tfor server in dead_servers:\n\t\t\tdel server_keys[server]\n\n\t\tfor server, keys in server_keys.iteritems():\n\t\t\ttry:\n\t\t\t\tfor _key in keys:\n\t\t\t\t\tres = yield server.read_line()\n\t\t\t\t\tif res != \"DELETED\":\n\t\t\t\t\t\tself._debuglog(\"expected 'DELETED', got %r\" % (res, ))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tret = False\n\n\t\traise StopIteration(ret)", "def delDoublon(values):\n\treturn list(set(values))", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "async def delete_many(self, **query):\n\n try:\n for result in await self.db.get_many(**query):\n await result.delete()\n except IntegrityError:\n raise ConflictException(\n f\"At least one {self.db_model_name} cannot be deleted since it is actively used\"\n )", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def edit_chunks(chunks, delete, replace, add):\n for type, v in chunks:\n if type in delete:\n continue\n if type in replace:\n yield type, replace[type].content.read()\n del replace[type]\n continue\n\n if b\"IDAT\" <= type <= b\"IDAT\" and replace:\n # If there are any chunks on the replace list by\n # the time we reach IDAT, add then all now.\n # put them all on the add list.\n for chunk in replace.values():\n yield chunk.type, chunk.content.read()\n replace = dict()\n\n if b\"IDAT\" <= type <= b\"IDAT\" and add:\n # We reached IDAT; add all remaining chunks now.\n for chunk in add:\n yield chunk.type, chunk.content.read()\n add = []\n\n yield type, v", "def test_bulk_delete_iterates_doc_ids_only_once(self):\n doc = self._index_new_doc()\n doc_ids = OneshotIterable([doc[\"_id\"]])\n self.adapter.bulk_delete(doc_ids) # does not raise IterableExhaustedError", "def sequence_replace(sequences, char_to_replace, char_replacements):\n return [sequence_replace_single(sequence, char_to_replace, char_replacements) for sequence in sequences]", "def delete_many(self, *keys):\n self.collection.remove({'_id': {'$in': keys}})\n return True", "def batch_process_async(self, delete_list=[], update_list=[]):\n headers = update_dict(self.default_headers, {self.API_VERSION_HEADER: self.API_VERSIONS[\"v2\"]})\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(self.request_url, data=json.dumps(payload), headers=headers, timeout=30)\n\n return r.status_code, r.json()", "def manage_deleteRRDDataPoints(self, ids=(), REQUEST=None):\n\n def clean(rel, id):\n for obj in rel():\n if id in obj.dsnames:\n obj.dsnames.remove(id)\n if not obj.dsnames:\n rel._delObject(obj.id)\n\n if not ids: return self.callZenScreen(REQUEST)\n for id in ids:\n dp = getattr(self.datapoints,id,False)\n if dp:\n clean(self.graphs, dp.name())\n clean(self.thresholds, dp.name())\n self.datapoints._delObject(dp.id)\n \n if REQUEST: \n return self.callZenScreen(REQUEST)", "def getDels(my_cigar, my_md):\n # we only need the first position of the getDiffLocs range\n # x[1][1:] cleans '^' from dels\n del_loc = (x[0] for x in getDiffLocs(my_cigar, 'D'))\n del_type = (x[1][1:] for x in splitTag(my_md) if x[1][0] == '^')\n return ((x, 'D', y) if len(y) == 1 else (x, 'P', y)\n for x,y in zip(del_loc, del_type))", "def test_ins_del_delta(civic, diff, main_data, updated_data, delta):\n civic._ins_del_delta(delta, 'genes', 'DELETE', [3], main_data['genes'])\n assert delta['genes']['DELETE'] == diff['genes']['DELETE']\n\n civic._ins_del_delta(delta, 'assertions', 'INSERT', [1],\n updated_data['assertions'])\n assert delta['assertions']['INSERT'] == diff['assertions']['INSERT']", "def replace_multiple_tax_with_invalid_ids(cazy_data, gbk_accessions, replaced_taxa_logger, args):\n # retrieve the first half of the list\n mid_point = int((len(gbk_accessions)/2))\n\n half_gbk_accs = gbk_accessions[:mid_point]\n\n cazy_data, success = replace_multiple_tax(\n cazy_data,\n half_gbk_accs,\n replaced_taxa_logger,\n args,\n invalid_ids=True,\n )\n\n if success:\n # invalid IDs are stored in the second half of the accessions list\n half_gbk_accs = gbk_accessions[mid_point:]\n\n for accession in tqdm(half_gbk_accs, desc='Retrieving taxonomies individually'):\n cazy_data, success = replace_multiple_tax(\n cazy_data,\n [accession],\n replaced_taxa_logger,\n args,\n invalid_ids=True,\n )\n\n if success is False:\n cazy_data = select_first_organism(cazy_data, [accession], replaced_taxa_logger)\n\n else:\n # invalid gbk ID present in the first half of the accessions list\n for accession in tqdm(half_gbk_accs, desc='Retrieving taxonomies individually'):\n cazy_data, success = replace_multiple_tax(\n cazy_data,\n [accession],\n replaced_taxa_logger,\n args,\n invalid_ids=True,\n )\n\n if success is False:\n cazy_data = select_first_organism(cazy_data, [accession], replaced_taxa_logger)\n\n # parse the second half of the accessions list\n half_gbk_accs = gbk_accessions[mid_point:]\n\n cazy_data, success = replace_multiple_tax(cazy_data, half_gbk_accs, True)\n\n if success is False:\n # invalid gbk ID present in the second half of the accessions list\n for accession in tqdm(half_gbk_accs, desc='Retrieving taxonomies'):\n cazy_data, success = replace_multiple_tax(\n cazy_data,\n [accession],\n replaced_taxa_logger,\n args,\n invalid_ids=True,\n )\n\n if success is False:\n cazy_data = select_first_organism(\n cazy_data,\n [accession],\n replaced_taxa_logger,\n )\n\n return cazy_data, True", "def test_remove_all_values4(delete_tree):\n delete_tree.remove(\"teabagged\")\n delete_tree.remove(\"teabaggers\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"ted\")", "def delete_files_or_dirs(delete_list):\n try:\n from os import unlink\n from shutil import rmtree\n except ImportError, ie:\n log.err(ie)\n\n for temp in delete_list:\n try:\n unlink(temp)\n except OSError:\n rmtree(temp, ignore_errors=True)", "def clean_ids(cls, diffsync: DiffSync, ids):\n return cls.clean_ids_or_attrs(diffsync, ids)", "def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()", "def delete(self, *names):\n\n return [shard.delete(*keys) for shard, keys\n in self.gather_keys_by_shard(names)]", "def user_unions_erase(*args):\n return _ida_hexrays.user_unions_erase(*args)", "def del_all(self, items):\n for item in items:\n item.key.delete()\n logger.debug(\"Deleted all the items\")", "def _one_edit_deleted_variations(token):\n splitted_token_pairs = [(token[:i], token[i:]) for i in range(len(token) + 1)]\n return (left_split + right_split[1:] for left_split, right_split in splitted_token_pairs if right_split)", "def delete_tenant_bulk(self, tenant_list, sync=False):", "def resolve_minus_d(\n time=None,\n config=None,\n arg_list=None,\n cbs_merge_parameters=None,\n directory_changer=None,\n operation_space_factory=None,\n instant_feedback_logger=None\n):\n iflogger = instant_feedback_logger\n namespace = PARSER.parse_args(args=arg_list)\n\n iflogger.info('Resolve deleted revisions')\n iflogger.info('Start time: ' + time.strftime('%Y-%m-%d-%H-%M-%S'))\n\n directory = os.path.join(\n config['homedir'], 'integ', cbs_merge_parameters['branch'], 'integ')\n directory_changer.change(directory=directory)\n\n operation_space = operation_space_factory.make()\n\n # set integInfo and integBranch\n iflogger.info('Set integInfo and integBranch')\n delconfig = operation_space['prepare_delenv'].execute(\n branch=cbs_merge_parameters['branch'],\n fromBranch=cbs_merge_parameters['fromBranch'],\n )\n\n # call p4 integ for delete revisions\n iflogger.info('Call p4 integ for delete revisions')\n deleted_file_list = operation_space['call_p4_integ'].execute(\n delconfig=delconfig,\n fromCLN=cbs_merge_parameters['fromCLN'],\n )\n\n if not deleted_file_list:\n iflogger.warn('Aborting: No deletion files detected')\n return\n\n # create a new changelist for the files with deletion\n iflogger.info('Creating changelist for deletion')\n deletion_cln = operation_space['create_changelist'].execute(\n change_type='deletion',\n fromCLN=cbs_merge_parameters['fromCLN'],\n branch=cbs_merge_parameters['branch'],\n fromBranch=cbs_merge_parameters['fromBranch'],\n count=str(len(deleted_file_list)),\n config=config,\n )\n\n # add conflicted files to conflict changelist\n iflogger.info('Adding deletion files to deletion changelist')\n iflogger.info('Changelist: %s' % deletion_cln)\n for file_identifier in deleted_file_list:\n iflogger.info('Adding %s' % file_identifier)\n operation_space['reopen'].execute(\n changelist_number=deletion_cln,\n file_identifier=file_identifier,\n config=config,\n )\n\n # Provide file history of deleted files on both branch and fromBranch\n iflogger.info('Displaying deletion files history')\n directory = os.path.join(\n config['homedir'], 'integ', cbs_merge_parameters['branch'], 'integ')\n directory_changer.change(directory=directory)\n for file_identifier in deleted_file_list:\n iflogger.info('<<<checking file: %s>>>' % file_identifier)\n operation_space['list_history'].execute(\n file_identifier=file_identifier,\n branch=cbs_merge_parameters['branch'],\n config=config,\n )", "def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False):", "def expunge(self):\n delete = []\n for i in self.messages:\n if '\\\\Deleted' in i[1]:\n delete.append(i)\n for i in delete:\n self.messages.remove(i)\n return [i[3] for i in delete]", "def del_seqs(self, keys):\n for j in range(len(keys)):\n del self._d_seqs[keys[j]]\n self._num_seqs = int(len(self._d_seqs))\n self._d_seqs = self._d_seqs\n self._seqs = list(self._d_seqs)", "async def delete(self, *keys, **kwargs):\n\n def gen_keys(keys):\n all_keys = []\n for key in keys:\n if isinstance(key, list):\n all_keys += gen_keys(keys=key)\n else:\n all_keys.append(key)\n return all_keys\n\n all_keys = gen_keys(keys)\n for key in all_keys:\n await self._client_conn.hdel(key=self.name, field=key)", "def remove_many_descriptors(self, uuids):", "def deleteManyFileRecords(deleted_files, default_dir):\n del_files = []\n session = Queries.createSession()\n try:\n for name, path, file_hash, size in deleted_files:\n file_servers = []\n file_path = path.replace(default_dir, '')\n user_file = session.query(FileTable).filter_by(original_name=name, user_path=file_path, file_hash=file_hash).first()\n if user_file is not None:\n servers = user_file.server_id[:]\n for server in servers:\n user_file.server_id.remove(server)\n file_servers.append((server.ip, server.port))\n del_files.append(('DELET_FILE', name, path, user_file.server_name, file_servers))\n session.delete(user_file)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return del_files", "def delete(self, first, last, insert=\"\"):\n assert all(new in self.ALPHABET for new in insert)\n if first < 1 or last > len(self.sequence):\n raise ValueError(f\"Deletion {first}-{last} out of bounds for given sequence.\")\n self.sequence = f\"{self.sequence[: first - 1]}{insert}{self.sequence[last:]}\"\n if \"mutations\" in self.metadata.keys():\n self.metadata[\"mutations\"] += f\" del{first}-{last}{insert}\"\n else:\n self.metadata[\"mutations\"] = f\"del{first}-{last}{insert}\"", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def delete_demos():\n for index in range(len(feconf.DEMO_EXPLORATIONS)):\n delete_demo(str(index))", "def test_fuzz_deletions():\n key_range = 2 ** 64\n value_range = 1024\n key_set = set()\n \n d = OrderedTreeDict()\n for value in range(0, value_range):\n key = randint(0, key_range)\n d.put(key, value)\n key_set.add(key)\n \n sorted_keys = list(sorted(key_set))\n sorted_keys_slice = sorted_keys[0:len(sorted_keys) // 2]\n \n for key in sorted_keys_slice:\n d.delete(key)\n assert len(d) > 0\n assert key not in d\n assert d.depth() <= int(2 * math.log(len(d), 2)), \"Should stay as balanced as a red black tree. \"\n \n keys = list(d.keys())\n assert len(keys) == len(sorted_keys_slice), \"Length should reflect number of items inserted.\"\n assert len(keys) == len(list(keys)), \"Iteration should find all items in tree.\"", "def delete(self, keys: List[K]) -> List[bool]:\n raise NotImplementedError('delete must be reimplemented in concrete implementation')", "def Delete(ids):\n db = catocommon.new_conn()\n\n # we have to check each cloud and see if it's used as the default...\n # if so, you can't delete it without first fixing the account.\n # ACCOUNTS REQUIRE A DEFAULT CLOUD\n existed = False\n for delete_id in ids:\n sql = \"select count(*) from cloud_account where default_cloud_id = %s\" % (delete_id)\n exists = db.select_col_noexcep(sql)\n\n if not exists:\n sql = \"delete from clouds_keypair where cloud_id = %s\" % (delete_id)\n db.tran_exec(sql)\n \n sql = \"delete from clouds where cloud_id = %s\" % (delete_id)\n db.tran_exec(sql)\n \n db.tran_commit()\n else:\n existed = True\n \n db.close()\n\n msg = \"\"\n if existed:\n msg = \"Some of the selected Clouds were not deleted because they are referenced by a Cloud Account. Delete the Account first, or assign it a new Default Cloud.\"\n \n return True, msg", "def delete_transactions(conn, ids):\n cur = conn.cursor()\n for item in ids:\n cur.execute(\"DELETE from transactions WHERE id=? \", (item,))\n conn.commit()" ]
[ "0.6490101", "0.5986913", "0.5909", "0.59087205", "0.58310133", "0.57015437", "0.56676894", "0.5641197", "0.55778617", "0.5471409", "0.54256725", "0.53889436", "0.53551024", "0.5288458", "0.52593184", "0.52453256", "0.52418804", "0.5230966", "0.51611537", "0.5143816", "0.5125344", "0.51252294", "0.5107974", "0.51036835", "0.5094425", "0.50930494", "0.50769854", "0.5060708", "0.5059513", "0.5045554", "0.5040782", "0.50131947", "0.49971107", "0.4992076", "0.4981251", "0.49767566", "0.49739927", "0.49622333", "0.4960184", "0.49459988", "0.49459258", "0.4943429", "0.49296448", "0.4923868", "0.4922505", "0.49167034", "0.49141833", "0.49141833", "0.49141833", "0.49141833", "0.4912248", "0.49054322", "0.49049717", "0.489591", "0.48930243", "0.4885917", "0.48836353", "0.48724788", "0.48724788", "0.48724788", "0.48724788", "0.48724788", "0.48724788", "0.48724788", "0.486637", "0.4865168", "0.4865168", "0.486309", "0.4858126", "0.4854564", "0.4848479", "0.48447457", "0.48445213", "0.48405352", "0.48263884", "0.48253414", "0.4814663", "0.47773576", "0.47707897", "0.47679225", "0.47643617", "0.47628516", "0.47407624", "0.47405759", "0.47405192", "0.4735886", "0.47345534", "0.4729944", "0.4724563", "0.4721864", "0.47205225", "0.47169405", "0.47162652", "0.47138906", "0.47138906", "0.47079092", "0.47025892", "0.4696488", "0.4691834", "0.4690064" ]
0.5214702
18
Convert attribute access to `getattr` call.
def visit_Attribute(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): args = [ node.value, ast.Str(node.attr) ] return to_call(to_name('getattr'), args) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getattr__(self, attr):\n return getattr(self.get_function(), attr)", "def __getattr__(self, attr):\n\n\tcommand = attr.replace('_', '-')\n\n\tif command in self.lambda_cache:\n\t return self.lambda_cache[command]\n\n\tif command in self.command_table: # is valid\n\t if command in ('read-data', 'read-icon'):\n\t\tx = lambda *args, **kwargs: self.call(command, *args, **kwargs)\n\t else:\n\t\tx = lambda *args, **kwargs: self.call_py(command, *args, **kwargs)\n\t self.lambda_cache[command] = x\n\t return self.lambda_cache[command]\n\n\traise AttributeError, 'unknown attribute %s (command %s)' % (attr, command)", "def rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))", "def __getattr__(self, attr):\n return self.get(attr)", "def __getattr__(self, attr):\n return getattr(self.rabjcallable, attr)", "def __getattr__(self, name: str) -> Any:\n return self.__getattribute__(name)", "def __getattr__(self, attr):\n fixed_attr = \"%s_%s\" % (self.resource_name, attr)\n actual_callable_op = getattr(self.actual_resource, fixed_attr)\n param_name = attr.replace(\"Get\", \"\").replace(\"Set\", \"\")[0].lower()\n return WrappedCallableOp(actual_callable_op, param_name)", "def __getattr__(self, x):\r\n return self[x]", "def deepgetattr(obj, attr):\n\t\treturn reduce(getattr, attr.split('.'), obj)", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def safe_chain_getattr(obj, attr):\n return reduce(getattr, attr.split('.'), obj)", "def __getattr__(self, name):\n if name in self:\n return self[name]\n raise AttributeError(_(\"Unknown attribute '%s'.\") % name)", "def __getattr__(self, attribute):\n if attribute.startswith('__'):\n raise AttributeError\n return getattr(self._http, attribute)", "def __getattr__(self, attribute):\n ret_val = getattr(self._value, attribute)\n return ret_val", "def __getattr__(self, attr):\n return getattr(self._parser, attr)", "def __getattr__(self, key):\n return self._func_for_key(key)", "def __getattr__(self, attr): # or does it ?\n return self.X[attr]", "def __getattr__(self, key):\n return self.get_attribute(key)", "def __getattr__(self, attr: str) -> str:\n\n if attr not in self.raw:\n raise AttributeError(\"unknown attribute '{}'\".format(attr))\n\n return self[attr]", "def __getattr__(self, name):\n attr_path = self._attr_path + (name,)\n if self._known_attrs is None or attr_path not in self._known_attrs:\n self._update_attrs()\n attr_info = self._known_attrs.get(attr_path)\n if attr_info is None:\n raise AttributeError('%s has no attribute \"%s\"' % (self, name))\n if attr_info['callable']:\n if attr_path not in self._callable_proxies:\n self._callable_proxies[attr_path] = _CallableProxy(\n self.actor_ref, attr_path)\n return self._callable_proxies[attr_path]\n elif attr_info['traversable']:\n if attr_path not in self._actor_proxies:\n self._actor_proxies[attr_path] = ActorProxy(\n self.actor_ref, attr_path)\n return self._actor_proxies[attr_path]\n else:\n message = {\n 'command': 'pykka_getattr',\n 'attr_path': attr_path,\n }\n return self.actor_ref.ask(message, block=False)", "def __getattr__(self, attributeName):\n\n return getattr(self.__decoratedObj, attributeName)", "def __getattr__(self, attr):\n if type(self).__name__ in __delegate_int__:\n if attr in __delegate_int__[type(self).__name__]:\n return __delegate_int__[type(self).__name__][attr]\n\n else:\n raise AttributeError(attr)\n\n else:\n raise AttributeError(attr)", "def __getattr__(self, name: str) -> Callable:\n return getattr(self.handle, name)", "def rgetattr(obj, attr, default=object()):\r\n if default is object():\r\n _getattr = getattr\r\n else:\r\n def _getattr(obj, name):\r\n \"\"\" Get an attribute from Krest object\"\"\"\r\n return getattr(obj, name, default)\r\n return functools.reduce(_getattr, [obj]+attr.split('.'))", "def __getattr__(self, attr):\n if attr in self._evtData_attrs:\n return getattr(self.evtData, attr)\n \n if attr in self._epicsLive_attrs:\n return getattr(self.epicsLive, attr)\n\n if attr in self._epicsStore_attrs:\n return getattr(self.epicsStore, attr)\n\n if attr in self.parameters:\n return self.parameters[attr]\n\n if attr in self._user_funcs:\n return self.get_function(attr)\n\n# if 'detectors' in self._det and attr in self._det['detectors']:\n if attr in self._detectors_attrs:\n return getattr(self._data, self._det['detectors'][attr])", "def __getattr__(self, attr):\n return getattr(self.compiled, attr)", "def __getattr__(name):\n return _MangledName.module_getattr(\n module_name=__name__, module_globals=globals(), name=name)", "def corner_case_getattr(target, attr):\n if isinstance(target, collections.abc.Sequence):\n return target[int(attr)]\n elif isinstance(target, collections.abc.Mapping):\n return target[attr]\n else:\n return getattr(target, attr)", "def __getattr__(self, name):\n return getattr(self.__decorated, name)", "def __getattr__(self, attr):\n\n # Prevent infinite recursion here.\n if attr.startswith('_'):\n return self.__getattribute__(attr) # Raise AttributeError.\n\n # TODO: with >3.5 support, can do:\n # pos_comps = {**self.pos_components,\n # **self._get_extra_mappings('pos')}\n pos_comps = self.pos_components.copy()\n pos_comps.update(self._get_extra_mappings('pos'))\n if attr in pos_comps:\n val = getattr(self.pos, pos_comps[attr])\n return val\n\n # TODO: with >3.5 support, can do:\n # pos_comps = {**self.vel_components,\n # **self._get_extra_mappings('vel')}\n vel_comps = self.vel_components.copy()\n vel_comps.update(self._get_extra_mappings('vel'))\n if attr in vel_comps:\n val = getattr(self.vel, vel_comps[attr])\n return val\n\n if attr in r.REPRESENTATION_CLASSES:\n return self.represent_as(attr)\n\n return self.__getattribute__(attr) # Raise AttributeError.", "def getattr(x, name):\n pass", "def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(self, name):\n try:\n return self[self.sig.argpos(name)]\n except:\n pass\n return BasicCall.__getattr__(self, name)", "def __getattr__(self, name):\n return self.lookup(name)", "def get_attr(obj, attr):\n return getattr(obj, attr)", "def __getattr__(self, item):\n return getattr(self.__dict__['_obj'], item)", "def _getattr(obj, name):\r\n return getattr(obj, name, default)", "def __getattr__(self, name: str) -> any:\n return self._dict[name]", "def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n raise AttributeError('No such attribute {0}'.format(name))\n return value", "def __getattr__(self, attr):\n\n # This way, we don't have to write: rv = Boto().boto.some_call\n # But can just write: rv = Boto().some_call\n # This also gives us hooks for future logging/timers/etc and\n # extended wrapping of things the attributes return if we so\n # choose.\n\n self._logger.debug('Calling wrapped boto attribute: %s on %s', attr, self)\n\n attr = getattr(self._boto, attr)\n\n if callable(attr):\n self._logger.debug(\"Boto attribute '%s' is callable\", attr)\n\n @wraps(attr)\n def wrapper(*args, **kwargs):\n return attr(*args, **kwargs)\n return wrapper\n\n return attr", "def __getattr__(self, attr):\n if attr.startswith(\"__\") and attr.endswith(\"__\"):\n return getattr(Select, attr)\n elif attr not in self.__dict__ and hasattr(self.__dict__[\"cut\"], attr):\n return getattr(self.__dict__[\"cut\"], attr)\n else:\n return self.__dict__[attr]", "def __getattr__(self, item):\n if item == \"data\":\n return self.f_get()\n elif item == \"default\":\n return self.f_get_default()\n else:\n raise AttributeError(\n \"`%s` object has no attribute `%s`.\" % (self.f_get_class_name(), item)\n )", "def x__getattr__(self, name):\n\n if hasattr(self.function_to_time, name):\n attr = getattr(self.function_to_time, name)\n return attr\n\n raise AttributeError(\n f\" {self} or its member {self}.function has no attribute '{name}'\")", "def dot(fn):\n def access(obj):\n return getattr(obj, fn)\n return access", "def __getattr__(self, item: str): # noqa: U100", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def _safe_getattr(value, attr, default):\n try:\n return getattr(value, attr)\n except Exception:\n return default", "def __getattr__(self, attr):\n return getattr(self.door, attr)", "def getattr_ops(self):\n return self._getattr_ops", "def get( self, function ):\n return getattr( function, self.attribute, '' )", "def __getattr__(self, key):\n if key in self.cmdmanager:\n action = self.cmdmanager[key]\n def call(*args):\n try:\n action(self, *args)\n except:\n traceback.print_exc()\n return call\n try:\n return self.__getattr__(key)\n except:\n return self.__getattribute__(key)", "def __getattr__ (self, attr):\n try:\n return self.get_value (attr)\n except exc.x_not_found:\n try:\n return self.get_key (attr)\n except exc.x_not_found:\n raise AttributeError", "def getattr(cls, obj, attr_name: str, default_value=None, execute_callable: bool = True):\n if obj is None:\n return default_value\n\n not_found = 'getattr() Not Found'\n attr_name = str(attr_name)\n attrs = attr_name.split('.')\n\n # no name return none\n if not attrs:\n return default_value\n\n for att, last_item in last_iter(attrs):\n val = getattr(obj, att, not_found)\n\n # if not found using getattr then try to use some other alternative methods\n if val is not_found:\n # test positive int to get attr using index instead of name (ie. lists and tuples)\n if Str.is_positive_int(att) \\\n and (isinstance(obj, list) or isinstance(obj, tuple)):\n index = int(att)\n if index < len(obj):\n val = obj[index]\n elif Obj.has_func(obj, '__getitem__', '__contains__'):\n if att in obj:\n val = obj[att]\n elif Str.is_positive_int(att):\n index = int(att)\n if index in obj:\n val = obj[index]\n\n if val is not_found:\n return default_value\n\n if val is None and not last_item:\n return default_value\n # endif not found\n\n if callable(val) and not isinstance(val, Manager) and execute_callable:\n # watch out for bound vs unbound method\n # ref: https://docs.python.org/3/library/inspect.html#inspect.ismethod\n # is bound method (ie. func of an instance, ie. not static)\n # if inspect.ismethod(val): (bounded, not use for now)\n val = val.__call__()\n\n obj = val\n\n return obj", "def __getattr__(self, key):\n return self.__getitem__(key)", "def __getattr__(self, attr):\n # orig_attr = self._wrapped_env.__getattribute__(attr)\n if hasattr(self._wrapped_env, '_wrapped_env'):\n orig_attr = self._wrapped_env.__getattr__(attr)\n else:\n orig_attr = self._wrapped_env.__getattribute__(attr)\n\n if callable(orig_attr):\n def hooked(*args, **kwargs):\n result = orig_attr(*args, **kwargs)\n return result\n\n return hooked\n else:\n return orig_attr", "def get_attr(self, name: str):\n return self.call(name)", "def __getattr__(self, name):\n ...", "def __tr_getattr__(self, name):\n raise AttributeError(name)", "def __tr_getattr__(self, name):\n raise AttributeError(name)", "def __getattr__(self, key):\n return self.get(key, None)", "def __getattr__ (self, name):\n\t\ttry:\n\t\t\treturn self.__dict__[name]\n\t\texcept KeyError:\n\t\t\treturn self.__dict__[\"value\"][name]", "def __getattr__(self, attr):\r\n\t\tif (attr in ['firmware', 'vfull', 'ifull', 'lifetime']):\r\n\t\t\treturn self.issue_command (command_id=attr, ch=None, operator='?', n_lines_requested=1)[0][0]", "def getattr(self, obj: t.Any, attribute: str) -> t.Any:\n try:\n return getattr(obj, attribute)\n except AttributeError:\n pass\n try:\n return obj[attribute]\n except (TypeError, LookupError, AttributeError):\n return self.undefined(obj=obj, name=attribute)", "def _get_fget(attr, private_attr, type_):\n # type: (str, str, Type[_T]) -> Callable[[], Any]\n\n def _fget(self):\n # type: (...) -> Any\n \"\"\"Get attribute from self without revealing the private name.\"\"\"\n try:\n return getattr(self, private_attr)\n except AttributeError:\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n _get_type_name(type_), attr\n )\n )\n\n return _fget", "def __call__(self, context):\r\n return getattr(context.obj, self.getAttrName(context))", "def __getattr__(self, attribute):\n\t\tassert ltrace_func(TRACE_BASE)\n\n\t\ttry:\n\t\t\treturn dict.__getitem__(self, attribute)\n\n\t\texcept KeyError:\n\t\t\ttry:\n\t\t\t\treturn dict.__getattr__(self, attribute)\n\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\treturn NamedObject.__getattr__(self, attribute)\n\n\t\t\t\texcept AttributeError:\n\t\t\t\t\traise AttributeError(\"'%s' %s%s\" % (stylize(ST_BAD, attribute),\n\t\t\t\t\t\t\t'' if attribute in self.__class__._licorn_protected_attrs\n\t\t\t\t\t\t\t\telse ('\\n\\t- it is currently missing from %s '\n\t\t\t\t\t\t\t\t\t'(currently=%s)' % ('%s.%s' % (\n\t\t\t\t\t\t\t\t\t\tstylize(ST_NAME, self.name),\n\t\t\t\t\t\t\t\t\t\tstylize(ST_ATTR,'_licorn_protected_attrs')),\n\t\t\t\t\t\t\t\t', '.join(stylize(ST_COMMENT, value)\n\t\t\t\t\t\t\t\t\tfor value in self.__class__._licorn_protected_attrs))),\n\t\t\t\t\t\t\t'\\n\\t- perhaps you tried to %s a %s?' % (\n\t\t\t\t\t\t\t\tstylize(ST_ATTR, 'getattr()'),\n\t\t\t\t\t\t\t\tstylize(ST_COMMENT, 'property()'))))", "def deepgetattr(obj, attr):\n for key in attr.split('.'):\n obj = getattr(obj, key)\n return obj", "def __getattr__(self, attr):\n # operator - run\n if self.processors[attr].__class__ == op:\n return self.processors[attr](self)\n\n # attribute - cache value\n if not attr in self.values:\n self.values[attr] = self.processors[attr](self)\n # return cached attribute value\n return self.values[attr]", "def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError(name)", "def __getattr__(self, attr):\n\t\treturn getattr(self.__instance, attr)", "def _getattr(self, obj, value):\n _attrs = value.split('.')\n for attr in _attrs:\n val = getattr(obj, attr.strip(), False)\n if val is False:\n break\n obj = val\n if val is False:\n return ''\n return self.format_data(val)", "def __getattr__(self, value):\n return getattr(self._policy, value)", "def __getattr__(self, key: Any) -> Any:\n try:\n return self[key]\n except KeyError:\n raise AttributeError(key)", "def __getattr__(self, item):\n\n try:\n return self.__getitem__(item)\n except KeyError:\n raise AttributeError(\"unable to access item '%s'\" % item)", "def __getattr__(self, name):\n\n if name not in self._extras:\n raise AttributeError(\"'%s' object has no attribute '%s'\" %\n (self.__class__.__name__, name))\n\n return self._extras[name]", "def __getattr__(self, name: str) -> Any:\n # We don't want to return anything for python copy / pickle methods.\n if name in _UNDEFINED_COPY_PICKLE_METHODS:\n raise AttributeError()\n self._try_setup()\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n raise AttributeError(\n f'\"{self.__class__.__name__}\" object has no attribute \"{name}\"')", "def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError", "def __getattr__(self, attr):\n if hasattr(self._me, attr):\n def wrapper(*args, **kwargs):\n return getattr(self._me, attr)(*args, **kwargs)\n return wrapper", "def __getattr__(self, name: str):\n return getattr(self.handle, name)", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, attribute):\n return self.parameters.get(attribute, None)", "def deepgetattr(obj, attr, default=None, splitter='.', do_raise=False):\n try:\n return reduce(getattr, attr.split(splitter), obj)\n except AttributeError:\n if do_raise:\n raise\n return default", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\n return getattr(self.__instance, attr)", "def __getattr__(self, attr):\r\n return getattr(self.__instance, attr)", "def chain_getattr(obj, attr, value=None):\n try:\n return _resolve_value(safe_chain_getattr(obj, attr))\n except AttributeError:\n return value", "def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]", "def __getattr__(self, key: str) -> Any:\n return getattr(self._package, key)", "def __getattr__(self, name):\n return getattr(self._module, name)", "def __getattr__(self, exported_function_name: str) -> ExportedFunction:\n pass", "def __getattr__(self, attr, *args, **kwargs):\r\n return getattr(core.FW_conf['connection'],attr)", "def __getattr__(self, key):\n return self.sub.__getattribute__(key)" ]
[ "0.7326412", "0.6922202", "0.6918359", "0.6700088", "0.66784173", "0.66713166", "0.66044515", "0.6591623", "0.65870625", "0.6569076", "0.6569076", "0.65116334", "0.6508529", "0.64372194", "0.6435451", "0.64023787", "0.6391228", "0.63764155", "0.6372691", "0.63678974", "0.63456607", "0.6327065", "0.63094026", "0.630226", "0.62973243", "0.6279547", "0.62114835", "0.62110084", "0.6194614", "0.6182958", "0.61819303", "0.61796296", "0.61552775", "0.6141721", "0.61243004", "0.61235905", "0.6117148", "0.6112398", "0.60863066", "0.60786164", "0.60751057", "0.60740715", "0.60688424", "0.6064711", "0.6053532", "0.604706", "0.6042099", "0.6032129", "0.6011924", "0.5994251", "0.5979827", "0.5966311", "0.5963813", "0.5962793", "0.59543985", "0.59496695", "0.59472233", "0.59440535", "0.5943176", "0.5943176", "0.5930388", "0.59299517", "0.5927504", "0.5922583", "0.5916052", "0.59087294", "0.59055215", "0.58967936", "0.5896543", "0.5891792", "0.5878359", "0.58643836", "0.5859238", "0.585668", "0.5850438", "0.5845904", "0.5845283", "0.58385324", "0.5829725", "0.58283335", "0.5814078", "0.5814078", "0.58135575", "0.58117944", "0.5808596", "0.5808596", "0.5808596", "0.5808596", "0.5808596", "0.5808596", "0.5808596", "0.5808596", "0.58053297", "0.5803817", "0.57932884", "0.5790601", "0.5781994", "0.5768818", "0.5767546", "0.57663774" ]
0.61914927
29
Convert assignment to attributes to `setattr` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr), node.value ] return ast.Expr(to_call(to_name('setattr'), args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(self, attr, value):", "def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def __setattr__(*args, **kwargs):\n \n pass", "def __setattr__(*args, **kwargs):\n \n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def __setattr__(*args, **kwargs): # real signature unknown\n pass", "def rsetattr(obj, attr, val):\n pre, _, post = attr.rpartition('.')\n return setattr(rgetattr(obj, pre) if pre else obj, post, val)", "def __setattr__(self, attr, value):\n self[attr] = value", "def setAttributes(self, args):\n for atr in self.defaultAttributes:\n if args.has_key(atr):\n # convert atr to proper type\n objAttr = getattr(self, atr)\n myType = type(args[atr])\n if type(objAttr) == types.IntType and myType <> types.IntType:\n args[atr] = int(args[atr])\n elif type(objAttr) == types.StringType and myType <> types.StringType:\n args[atr] = str(args[atr])\n elif type(objAttr) == types.ListType and myType <> types.ListType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.DictType and myType <> types.DictType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.FloatType and myType <> types.FloatType:\n args[atr] = float(args[atr])\n setattr(self, atr, args[atr])", "def AssignAttributes(self, attr):\r\n \r\n self.SetAttributes(attr)\r\n self._ownsAttr = True", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def assign_attrs(elem, attrs):\n for k, v in attrs:\n # assign attr k with v\n # override class\n elem.set(sanitize_name(k), v)", "def __setattr__(self, name, value):\n self[name] = value\n return self", "def __setattr__(self, ???):", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ..." ]
[ "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.64025", "0.6343958", "0.6256444", "0.62069863", "0.62069863", "0.62035817", "0.62035817", "0.62035817", "0.62035817", "0.62035817", "0.62035817", "0.6201288", "0.6164921", "0.6040148", "0.6034232", "0.596068", "0.5960434", "0.59591424", "0.594788", "0.5926075", "0.58790284", "0.58790284", "0.58790284", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741", "0.5869741" ]
0.708953
0
Convert `del` on attributes to `delattr` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr) ] return ast.Expr(to_call(to_name('delattr'), args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __delattr__(cls, name):\n raise TypeError('May not delete attributes on definition class')", "def __delattr__(self, item):\n raise AttributeError(f'Attribute {key} can not be deleted')", "def __delattr__(self, name):\n del self[name]", "def __delattr__(self, name):\n self.unset(name)", "def __delattr__(self, name):\n if name.endswith(\"_\"):\n self.drop_var(name[:-1])\n else:\n if name not in self.__dict__:\n msg = \"'{}' object has no attribute '{}'\"\n raise AttributeError(msg.format(self.__class__.__name__, name))\n del self.__dict__[name]", "def destroy (self, *attrs):\n for attr in ('rect', 'draw_fn') + attrs:\n try:\n delattr(self, attr)\n except AttributeError:\n pass", "def __delattr__(self, name):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delattr__')(name)\n for x in self:\n x.__delattr__(name)\n return self", "def __delattr__(self, name: str) -> None:\n self.remove(name)", "def _maybe_del_attr(da, attr):\n if attr in da.attrs:\n del da.attrs[attr]\n\n return da", "def __delattr__(self, key: Any):\n try:\n del self[key]\n except KeyError:\n raise AttributeError(key)", "def remove_feature_accessors(obj, feats: FeaturesTuple):\n for feat in feats:\n try:\n delattr(obj, feat.get_name())\n\n except AttributeError:\n pass", "def new_deconstruct(self):\n name, path, args, kwargs = original_deconstruct(self)\n for attr in IGNORED_ATTRS:\n kwargs.pop(attr, None)\n return name, path, args, kwargs", "def del_functions(self, *args):\n if len(args) > 0:\n attrs = args\n else:\n self._user_function.clear()", "def __delattr__(self, item):\n if item[0] == \"_\":\n super(Result, self).__delattr__(item)\n else:\n self.f_remove(item)", "def __delattr__(self, item):\n if item.startswith(\"_\"):\n return super(Slicer, self).__delattr__(item)\n\n # Sync private attributes that help track\n self._objects.pop(item, None)\n self._aliases.pop(item, None)\n if item == \"o\":\n self._anon.clear()\n\n # Recompute max_dim\n self._recompute_max_dim()\n\n # Recompute alias lookup\n # NOTE: This doesn't use diff-style deletes, but we don't care (not a perf target).\n self._alias_lookup = AliasLookup(self._aliases)\n\n # TODO: Mutate and check interactively what it does\n super(Slicer, self).__delattr__(item)", "def delete(self, attribute):\n self.__delattr__(attribute)", "def deleteATTR(sel=None):\n if sel == None:\n sel = pm.ls(sl=1)\n for obj in sel:\n #remove customAttr with keyable\n attrs = pm.listAttr(obj,k=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)\n #remove customAttr with Nonkeyable\n attrs = pm.listAttr(obj,cb=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)", "def __delattr__(self, feature):\n setattr(self, feature, None)", "def delattrs(self, srvurl, attrs = \"\", callback = None, cbdata = None):\n cb = callback\n if not callback:\n cb = self.__errcb\n cbdata = [ SLPError.SLP_OK ]\n err = self.slph.delattrs(srvurl, slpstr(attrs), cb, cbdata)\n if err != SLPError.SLP_OK:\n raise SLPError(err)\n if not callback:\n if cbdata[0] != SLPError.SLP_OK:\n raise SLPError(cbdata[0])", "def del_attrib(self, key):\n self.aux_attrib.pop(key)\n self.aux_attrib_args.pop(key)", "def __delattr__(self, attribute: str) -> None:\n try:\n object.__delattr__(self, attribute)\n except AttributeError:\n try:\n object.__delattr__(self.contents, attribute)\n except AttributeError:\n raise AttributeError(f'{attribute} is not in {self.__name__}')", "def del_attr(self, elt, localName, ns=None):\n for (pyname, (qname, ns_)) in elt.xml_attributes.items():\n _, name = SplitQName(qname)\n if ns_ == ns and name == localName:\n delattr(elt, pyname)", "def delattr(space, w_object, w_name):\n w_name = checkattrname(space, w_name)\n space.delattr(w_object, w_name)\n return space.w_None", "def attribute_del(self, serial, domain, keys=()):\n\n if keys:\n q = (\"delete from attributes \"\n \"where serial = ? and domain = ? and key = ?\")\n self.executemany(q, ((serial, domain, key) for key in keys))\n else:\n q = \"delete from attributes where serial = ? and domain = ?\"\n self.execute(q, (serial, domain))", "def __delete__(self, obj):\n try:\n delattr(obj, self.cache_attr)\n except AttributeError:\n pass", "def __delattr__(self, name):\r\n # The __delattr__ of the parent class should always be called so that the attribute actually gets removed.\r\n val = getattr(self, name)\r\n if type(val) == Constraint:\r\n self._remove_constraint(val)\r\n val.name = 'None'\r\n elif type(val) == ConstraintDict():\r\n val.name = 'None'\r\n val._model = None\r\n for k, v in val.items():\r\n self._remove_constraint(v)\r\n elif type(val) in {Var, Param, VarDict, ParamDict}:\r\n val.name = 'None'\r\n\r\n super(Model, self).__delattr__(name)", "def __delattr__(self, attr):\n # Set to default value\n if attr in self.fields:\n setattr(self, attr, self.fields[attr].default)\n else:\n super(BaseModel, self).__delattr__(attr)", "def __delitem__(self, key):\n try:\n del self._axl_data[key]\n except KeyError:\n raise AXLAttributeError(f\"Unknown AXL attribute for API endpoint: {key}\")", "def __delitem__(name):", "def __delattr__(self, attr):\n s = cleanup_name(attr)\n try:\n self[self[\"__psvcolumnstracker__\"][attr]] = \"\"\n except KeyError:\n if attr in self.__delwhitelist__:\n super(Row, self).__delattr__(attr)\n else:\n keys = self[\"__psvcolumnstracker__\"].keys()\n if s in keys:\n raise AttributeError((\n \"{}{}\"\n .format(\n '\\'{}\\' has no attribute \\'{}\\''.format(\n type(self), attr),\n \". However, '{s}' is an existing condensed \".format(s=s) +\n \"column name. Only the condensed version is supported.\"\n .format(s=s)\n )))\n else:\n\n if attr in dir(self):\n raise AttributeError(\n msg.attribute_readonly.format(classname=self.__class__, attr=attr))\n else:\n raise AttributeError(msg.attribute_missing.format(\n type(self), attr))", "def detach(cls, factory, attrib_name):\n cls._to_attach.remove((factory, attrib_name))", "def __delattr__(self, name):\n try:\n super(JobSubmission, self).__delattr__(name)\n return\n\n except AttributeError:\n pass\n\n try:\n del self.params[str(name)] # TODO: resolve parameter cases\n\n except KeyError:\n raise AttributeError(\"'JobSubmission' object has no attribute or \"\n \"parameter: {atr}\".format(atr=name))", "def __delitem__(self, key):\n key_split = key.split('.')\n current = self\n for k in key_split[:-1]:\n current = getattr(current, k)\n current.__delattr__(key_split[-1])", "def __delitem__(self, key):\n self.deleteAttributes([key])", "def deleteAttr(*args, attribute: AnyStr=\"\", name: AnyStr=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def cleanup(self):\n for attribute in self._all_db_field_names:\n delattr(self, attribute)", "def attr_remove(self):\n def _del_if_in(obj, attr):\n if attr in obj:\n del obj[attr]\n if self._modifier_exists(REMOVE_KEY):\n to_remove = self[CONFIG_KEY][SAMPLE_MODS_KEY][REMOVE_KEY]\n _LOGGER.debug(\"Removing attributes: {}\".format(to_remove))\n for attr in to_remove:\n [_del_if_in(s, attr) for s in self.samples]", "def popattr(obj, attr, default=NOT_PROVIDED):\n val = getattr(obj, attr, default)\n try:\n delattr(obj, attr)\n except AttributeError:\n if default is NOT_PROVIDED:\n raise\n return val", "def del_dict_attrs(d, key):\n key_parts = key.split('.')\n if len(key_parts) > 1:\n d[key_parts[:1][0]] = del_dict_attrs(d[key_parts[:1][0]], '.'.join(key_parts[1:]))\n else:\n del d[key_parts[:1][0]]\n return d", "def clear_attrs(self):\n self._attributes.clear()", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def remove_attr(self, event: Union[wx.CommandEvent, None],\n attr_id: Union[int, None]) -> None:\n self.attr_buttons.pop(attr_id).Destroy()\n self.attr_values.pop(attr_id).Destroy()\n self.attr_labels.pop(attr_id).Destroy()\n attr_label = self.attr_ids.pop(attr_id)\n if attr_label != '':\n self.element.attr.pop(attr_label)\n if event is not None:\n self._update_attr_list()", "def removeAttr(self, *args):\n return _libsbml.XMLToken_removeAttr(self, *args)", "def __delitem__(self, key):\n del self._dict[key]\n del self._type_converter[key]", "def removeAttr(atributes=('exp'), *items):\n for item in items:\n # check if item is pynode\n if not isinstance(item, pm.nodetypes.Transform):\n logger.debug('Create Pynode: %s, %s' % (item, type(item)))\n item = pm.PyNode(item)\n\n # deleteAttrs\n for attr in atributes:\n try:\n item.attr(attr).delete()\n logger.info('Remove attribute: %s.%s' % (item, attr))\n\n except:\n logger.info('Can not delete attr: %s' % attr)", "def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)", "def remove_attribute(self, name):\n\n pass", "def clean_up(self):\n while len(self.__refs_for_deletion): \n attr = self.__refs_for_deletion.pop()\n obj = getattr(self, attr)\n if hasattr(obj, 'clean_up'):\n obj.clean_up()\n delattr(self, attr)", "def delete_xattr(self, xattr):\n return delete_fattr(self._host, self._fqpath, xattr)", "def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair", "def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair", "def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair", "def unregister(self, alias):\n delattr(self, alias)", "def clean_attrs(cls, diffsync: DiffSync, attrs):\n return cls.clean_ids_or_attrs(diffsync, attrs)", "def test_del_attribute_is_assigned_properly(self):\r\n class DelModel(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n key = columns.Integer(primary_key=True)\r\n data = columns.Integer(required=False)\r\n\r\n model = DelModel(key=4, data=5)\r\n del model.data\r\n with self.assertRaises(AttributeError):\r\n del model.key", "def __delitem__(self, feature):\n self[feature] = None", "def unhide_attributes(pynode, attr_name_list = None):\n if attr_name_list is None:\n attr_name_list = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n\n _do_attributes_key_lock_hide(pynode, attr_name_list, hide = False)", "def removeattribute(self, uid, field):\n\n raise NotImplementedError", "def test_descriptor_del_dict(self):\n obj = TestObject()\n self.assertIsNone(obj.__dict__.get('test_setting'))\n obj.test_setting = \"foo\"\n self.assertIsNotNone(obj.__dict__.get('test_setting'))\n del obj.test_setting\n self.assertNotIn('test_setting', obj.__dict__)", "def delmemo(self, meth, *args, **kwargs):\n # see utils.memozie_method\n if hasattr(self, '_cache'):\n meth = getattr(self, meth )if isinstance(meth, basestring) else meth\n del self._cache[meth.func.meth, args, tuple(sorted(kwargs.items()))]", "def clear_cache(obj):\n try:\n delattr(obj, _CACHE_ATTR)\n except AttributeError:\n pass", "def detachDeviceAttr(*args, all: bool=True, attribute: AnyStr=\"\", axis: AnyStr=\"\", device:\n AnyStr=\"\", selection: bool=True, q=True, query=True, **kwargs)->Union[None,\n Any]:\n pass", "def _del(self) -> None:\n self.variables.pop(prop_name, None)", "def remove_property(class_, name):\n mapper = class_.mapper\n table = class_.__table__\n columns = class_.mapper.c\n column = columns[name]\n del columns._data[name]\n del mapper.columns[name]\n columns._all_cols.remove(column)\n mapper._cols_by_table[table].remove(column)\n mapper.class_manager.uninstrument_attribute(name)\n del mapper._props[name]", "def unobserve(self, attr: str | tuple[str, ...]):\n if isinstance(attr, str):\n attr = (attr,)\n path = self._path + attr\n return self._get_top_parent().unobserve(path)", "def del_handler(key):\n def wrapper(func):\n func.del_key = key\n return func\n\n return wrapper", "def __delattr__(self, name):\n if name in SimSnap._persistent:\n obj = self.ancestor._get_persist(self._inclusion_hash, name)\n if obj:\n self.ancestor._set_persist(self._inclusion_hash, name, None)\n try:\n object.__delattr__(self, name)\n except AttributeError:\n pass\n return\n object.__delattr__(self, name)", "def test_remove_a_single_attribute(self):\n pass", "def disconnectAttr(*args, nextAvailable: bool=True, **kwargs)->AnyStr:\n pass", "def unregister(self, rtypes=None, accessors=None):\n\n if rtypes is not None:\n for rtype in rtypes:\n del self[rtype]\n\n if accessors is not None:\n for accessor in accessors:\n for rtype in accessor.__rtypes__:\n if rtype in self:\n del self[rtype]", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def remove_descriptor(self, uuid):", "def remove(\n self,\n key=None,\n raise_exception=False,\n category=None,\n accessing_obj=None,\n default_access=True,\n ):\n\n if key is None:\n self.clear(\n category=category, accessing_obj=accessing_obj, default_access=default_access\n )\n return\n\n category = category.strip().lower() if category is not None else None\n\n for keystr in make_iter(key):\n keystr = keystr.lower()\n\n attr_objs = self._getcache(keystr, category)\n for attr_obj in attr_objs:\n if not (\n accessing_obj\n and not attr_obj.access(accessing_obj, self._attredit, default=default_access)\n ):\n try:\n attr_obj.delete()\n except AssertionError:\n print(\"Assertionerror for attr.delete()\")\n # this happens if the attr was already deleted\n pass\n finally:\n self._delcache(keystr, category)\n if not attr_objs and raise_exception:\n raise AttributeError", "def test_descriptor_del(self):\n\n obj = TestObject()\n self.assertIsNone(obj.test_setting)\n obj.test_setting = \"foo\"\n self.assertIsNotNone(obj.test_setting)\n del obj.test_setting\n self.assertIsNone(obj.test_setting)", "def remove_attributes(cube, field, filename):\n cube.attributes = None", "def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)", "def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)", "def remove_attr(self, event: Union[wx.CommandEvent, None],\n attr_id: Union[int, None]) -> None:\n self.attr_req_buttons.pop(attr_id).Destroy()\n self.attr_req_elements.pop(attr_id).Destroy()\n self.attr_req_labels.pop(attr_id).Destroy()\n attr_label = self.attr_req_ids.pop(attr_id)\n if attr_label != '':\n self.attr_requirements[self.element].pop(attr_label)\n if event is not None:\n self._update_attr_list()", "def delete_attributes(self, attribute_list):\n with LayerEditingManager(self.layer, 'Remove attributes', DEBUG):\n # remove attributes\n layer_pr = self.layer.dataProvider()\n print \"REMOVING %s\" % attribute_list\n #TODO fix this\n print \"TODO fix ProcessLayer.delete_attributes()\"\n print \"this attributes should be deleted: %s\" % attribute_list\n #return layer_pr.deleteAttributes(attribute_list)", "def clear(self, attrname):\n self.__dict__['_'+attrname] = False", "def remove_attr(self, key):\n del self.header[key]", "def __delitem__(self, key):\n if not isinstance(key, str) or '.' not in key:\n dict.__delitem__(self, key)\n return\n obj, token = _descend(self, key)\n del obj[token]", "def delete_attributes(self, attrs):\r\n assert(isinstance(attrs, list)), \"Argument must be a list of names of keys to delete.\"\r\n self._manager.domain.delete_attributes(self.id, attrs)\r\n self.reload()\r\n return self", "def drop_attr(self, attr_name): # DONE\n self.data.drop(attr_name, axis=1, inplace=True)\n print(self.data)", "def clear_attributes(self):\n self.attrs = etad.AttributeContainer()", "def unregister(self, old):\n if old is not None and old is not Uninitialized:\n try:\n active = self.active.pop(old, None)\n if active is not None:\n for name, type in active:\n getattr(self, type)(old, name, True)\n except TypeError:\n # An error can occur if 'old' is a list or other object for\n # which a weakref cannot be created and used an a key for\n # 'self.active':\n pass", "def delete_fattr(host, fqpath, fattr):\n command = 'setfattr -x %s %s' % (fattr, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('setfattr -x failed: %s' % rerr)\n return False", "def __del__(self):\n \n pass", "def move_arg_attributes(self, arg, old_node, new_node):\n arg.metaattrs[\"deref\"] = new_node.ast.metaattrs[\"deref\"]\n new_node.ast.metaattrs[\"deref\"] = None\n \n c_attrs = new_node.ast.attrs\n attrs = arg.attrs\n for name in [\"owner\", \"free_pattern\"]:\n if c_attrs[name]:\n attrs[name] = c_attrs[name]\n del c_attrs[name]", "def __del__ ( self ) :\n \n if self.name and self.name in self.__pdf_names :\n self.__pdf_names.remove ( self.name ) \n while self.__local_names :\n a = self.__local_names.pop ()\n if a in self.__var_names :\n self.__var_names.remove ( a )", "def clean_object(metadata, analysistype):\n for sample in metadata:\n try:\n delattr(sample[analysistype], \"targetnames\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"targets\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"dnaseq\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"protseq\")\n except AttributeError:\n pass" ]
[ "0.70554286", "0.6918985", "0.6561412", "0.6517371", "0.6463424", "0.63563657", "0.6319125", "0.6317646", "0.6296523", "0.62805814", "0.6253668", "0.62185895", "0.62172204", "0.6147379", "0.6073365", "0.6065975", "0.6024653", "0.6012962", "0.59854305", "0.5978835", "0.59204125", "0.5860359", "0.5814229", "0.5785466", "0.5773502", "0.575627", "0.5755471", "0.56249535", "0.56056577", "0.5604953", "0.56017613", "0.55820704", "0.5580732", "0.55737376", "0.5572918", "0.5548307", "0.552339", "0.551858", "0.54590154", "0.53917414", "0.5376547", "0.5376146", "0.537525", "0.5361161", "0.53586763", "0.53581357", "0.53363276", "0.53285956", "0.5281377", "0.52507544", "0.52507544", "0.52507544", "0.52192944", "0.52112776", "0.52051765", "0.51867795", "0.5171538", "0.51710546", "0.51707804", "0.5163823", "0.51605403", "0.5157271", "0.513164", "0.51229006", "0.5115819", "0.5110208", "0.51067394", "0.5093686", "0.50657964", "0.5058017", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5033544", "0.5031936", "0.5029633", "0.50154877", "0.5012997", "0.50037634", "0.50037634", "0.50020784", "0.4983765", "0.49823508", "0.49629852", "0.4938924", "0.49321228", "0.492453", "0.49215636", "0.49164033", "0.49156684", "0.4911622", "0.4909852", "0.49064875", "0.48942688" ]
0.6552601
3
Convert index (slice) to functional expression.
def index_to_expr(self, index): if isinstance(index, ast.Index): return index.value elif isinstance(index, ast.Slice): if index.lower is None and index.step is None: args = [ index.upper ] elif index.step is None: args = [ index.lower, index.upper ] else: args = [ index.lower, index.upper, index.step ] args = [ to_name_constant(None) if arg is None else arg for arg in args ] return to_call(to_name('slice'), args) elif isinstance(index, ast.ExtSlice): indexes = list(map(self.index_to_expr, index.dims)) return ast.Tuple(elts=indexes, ctx=ast.Load()) elif isinstance(index, ast.Tuple): elts = list(map(self.index_to_expr, index.elts)) return ast.Tuple(elts=elts, ctx=ast.Load()) else: return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)", "def special_slice(self, form):\n obj = self.reallyCompile(form[1])\n rest = form[2:]\n if len(rest) == 1:\n return ast.Subscript(obj, 'OP_APPLY', [self.reallyCompile(rest[0])])\n elif len(rest) == 2:\n return ast.Slice(obj, 'OP_APPLY', *self.compileForms(rest))\n elif len(rest) == 3:\n return ast.Subscript(obj, 'OP_APPLY', [ast.Sliceobj(self.compileForms(rest))])\n else:\n raise SyntaxError(\"Too many thingies to slice! %r\" % rest)", "def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]", "def __getitem__(self, index):\n if isinstance(index, slice):\n return Vetor(self.elem[index])\n else:\n return self.elem[index]", "def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)", "def apply_slice(*, value : Any, slice : slice) -> Any:\n return value[slice]", "def __getitem__(self, index):\n # type: (int) -> Any\n items = list.__getitem__(self, index)\n return type(self)(self._values(items)) if isinstance(index, slice) else self.value(items)", "def __getitem__(self, idx):\n if not isinstance(idx, (slice, numbers.Integral)):\n raise ValueError('Index indices must be integers')\n if isinstance(idx, slice):\n if idx.step not in (None, 1):\n raise IndexError('Index does not support variable stepping')\n s, e = None, None\n if idx.start is not None:\n s = idx.start\n if s < 0:\n s += len(self)\n s = self.lookup(s)\n if idx.stop is not None:\n e = idx.stop\n if e >= len(self):\n e = None\n else:\n e = self.lookup(e)\n idx = slice(s, e)\n else:\n idx = self.lookup(idx)\n return self.src[idx]", "def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.end - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn index - self.start\n\t\telif type(index) is span:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")", "def _read_index_slice(self, *args, **kwargs): # real signature unknown\n pass", "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def map(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = self.start - (-index - 1)\n\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn self.start - index\n\t\telif type(index) is rspan:\n\t\t\tif self & index != index:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\tstop = self.map(index.end) + 1\n\t\t\treturn slice(self.map(index.start), None if stop == len(self) else stop)\n\t\telif type(index) is slice:\n\t\t\tstop = self.map(index.stop if index.stop is not None else self.end) + 1\n\t\t\treturn slice(self.map(index.start if index.start is not None else self.start), None if stop == len(self) else stop)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.map(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")", "def create(self, index):\n return self._operator_generator(index, -1.0)", "def __getitem__(sliceOrIdentifier):", "def unstacked_index(size, index):\n return index % size, index // size", "def indexer(expression, stream):\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)", "def reconstruct_input(self, ix):", "def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')\n end = new_attr.get('end')\n axes = new_attr.get('axis', tuple(range(len(begin))))\n slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])\n if len(axes) > 1:\n for i, axis in enumerate(axes):\n slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])\n return slice_op", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def __getslice__(self,i,j):\n return self.x[i:j]", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def select(index, *decorators):\n def wrapped(*args, **kwargs):\n return decorators[int(index)](*args, **kwargs)\n return wrapped", "def expr(self):\n\n args = []\n for i in self.indices:\n args.extend((i.j, i.m))\n return Wigner3j(*args)", "def get_slice(x, indices):\n return x[indices]", "def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n else:\n with reraise_getitem_errors(TypeError, KeyError, IndexError):\n node = self.get_tree_entries()[index]\n return self._defining_context.infer_node(node)", "def _get_slice(index, axis, num_axes):\n idx = [slice(None)] * num_axes\n idx[axis] = index\n return tuple(idx)", "def __getitem__(self, index):\n if isinstance(index, slice):\n return TokenList(self.token_list[index.start:index.stop:index.step])\n if index < 0: # Handle negative indices.\n index += len(self)\n return self.token_list[index]", "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]", "def _processUnhashableIndex(self, idx):\n from pyomo.core.expr import current as EXPR\n #\n # Iterate through the index and look for slices and constant\n # components\n #\n fixed = {}\n sliced = {}\n ellipsis = None\n _found_numeric = False\n #\n # Setup the slice template (in fixed)\n #\n if normalize_index.flatten:\n idx = normalize_index(idx)\n if idx.__class__ is not tuple:\n idx = (idx,)\n\n for i,val in enumerate(idx):\n if type(val) is slice:\n if val.start is not None or val.stop is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: start and stop values are not allowed.\")\n if val.step is not None:\n logger.warning(\n \"DEPRECATION WARNING: The special wildcard slice \"\n \"(::0) is deprecated. Please use an ellipsis (...) \"\n \"to indicate '0 or more' indices\")\n val = Ellipsis\n else:\n if ellipsis is None:\n sliced[i] = val\n else:\n sliced[i-len(idx)] = val\n continue\n\n if val is Ellipsis:\n if ellipsis is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: the Pyomo wildcard slice (Ellipsis; \"\n \"e.g., '...') can only appear once\")\n ellipsis = i\n continue\n\n if hasattr(val, 'is_expression_type'):\n _num_val = val\n # Attempt to retrieve the numeric value .. if this\n # is a template expression generation, then it\n # should raise a TemplateExpressionError\n try:\n val = EXPR.evaluate_expression(val, constant=True)\n _found_numeric = True\n\n except TemplateExpressionError:\n #\n # The index is a template expression, so return the\n # templatized expression.\n #\n from pyomo.core.expr import current as EXPR\n return EXPR.GetItemExpression(tuple(idx), self)\n\n except EXPR.NonConstantExpressionError:\n #\n # The expression contains an unfixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is not a constant value. This is likely not what you meant to\ndo, as if you later change the fixed value of the object this lookup\nwill not change. If you understand the implications of using\nnon-constant values, you can get the current value of the object using\nthe value() function.\"\"\" % ( self.name, i ))\n\n except EXPR.FixedExpressionError:\n #\n # The expression contains a fixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is a fixed but not constant value. This is likely not what you\nmeant to do, as if you later change the fixed value of the object this\nlookup will not change. If you understand the implications of using\nfixed but not constant values, you can get the current value using the\nvalue() function.\"\"\" % ( self.name, i ))\n #\n # There are other ways we could get an exception such as\n # evaluating a Param / Var that is not initialized.\n # These exceptions will continue up the call stack.\n #\n\n # verify that the value is hashable\n hash(val)\n if ellipsis is None:\n fixed[i] = val\n else:\n fixed[i - len(idx)] = val\n\n if sliced or ellipsis is not None:\n return _IndexedComponent_slice(self, fixed, sliced, ellipsis)\n elif _found_numeric:\n if len(idx) == 1:\n return fixed[0]\n else:\n return tuple( fixed[i] for i in range(len(idx)) )\n else:\n raise DeveloperError(\n \"Unknown problem encountered when trying to retrieve \"\n \"index for component %s\" % (self.name,) )", "def __getitem__(self, index):\n def _getTextByIndex(blockIndex):\n return self._doc.findBlockByNumber(blockIndex).text()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n return _getTextByIndex(index)\n elif isinstance(index, slice):\n start, stop, step = index.indices(self._doc.blockCount())\n return [_getTextByIndex(blockIndex) \\\n for blockIndex in range(start, stop, step)]", "def __getitem__(self, item):\n if isinstance(item, slice):\n item = replace_slice_defaults(item)\n\n self.update_rows(item)\n\n return Index(self.expr,\n self.dtype)\n elif isinstance(item, LazyResult):\n if str(item.weld_type) != str(numpy_to_weld_type('bool')):\n raise ValueError('expected LazyResult of bool to filter Index elements')\n\n return Index(weld_filter(self.expr,\n item.expr),\n self.dtype)\n else:\n raise TypeError('expected slice or LazyResult of bool in Index.__getitem__')", "def __getitem__(self, index):\n if isinstance(index, tuple):\n attr, index = index\n if not isinstance(index, slice):\n raise ValueError('When a attribute name is provided, '\n 'the second parameter is expected to be a '\n 'slice range. Value given: `{}`'.format(\n index))\n return self.filter(attr, index.start, index.stop, index.step)\n\n if isinstance(index, Iterable):\n item = [self._channels[i] for i in index]\n else:\n item = self._channels[index]\n\n if isinstance(item, Sequence):\n return Grid(item)\n\n return item", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def take_2d_positional(self, index=None, columns=None):\n index = slice(None) if index is None else index\n columns = slice(None) if columns is None else columns\n\n def applyer(df):\n return df.iloc[index, columns]\n\n return DataFrameDefault.register(applyer)(self)", "def convert_index(idx, decomposition, mode='glb_to_loc'):\n if is_integer(idx) or isinstance(idx, slice):\n return decomposition(idx, mode=mode)\n elif isinstance(idx, (tuple, list)):\n return [decomposition(i, mode=mode) for i in idx]\n elif isinstance(idx, np.ndarray):\n return np.vectorize(lambda i: decomposition(i, mode=mode))(idx)\n else:\n raise ValueError(\"Cannot convert index of type `%s` \" % type(idx))", "def __getitem__(self, i):\n if isinstance(i, slice):\n return self.v[slice(i.start+self.tau-1, i.stop+self.tau-1, i.step)]\n else:\n return self.v[i+self.tau-1]", "def f(self, eval_grid, index_data):\n\n # Make sure we have 2-d arrays throughout.\n if len(eval_grid.shape) == 1:\n eval_grid = np.reshape(eval_grid, (len(eval_grid), 1))\n elif len(eval_grid.shape) > 2:\n raise ValueError(eval_grid.shape)\n if len(index_data.shape) == 1:\n index_data = np.reshape(index_data, (len(index_data), 1))\n elif len(index_data.shape) > 2:\n raise ValueError(index_data.shape)\n\n return self.f_s(\n index=eval_grid,\n index_s=index_data,\n leave_one_out_locs=np.array([], dtype=np.int64),\n other_locs=np.arange(len(eval_grid))\n )", "def _convert_col_index(self, index):\n if index is None or isinstance(index, int): return index\n if isinstance(index, str):\n find_index = self._varlist.index\n return [find_index(v) for v in self._find_vars(index)]\n if isinstance(index, collections.Iterable):\n new_index = []\n append = new_index.append\n find_vars = self._find_vars\n find_index = self._varlist.index\n for i in index:\n if isinstance(i, str):\n new_index += [find_index(i) for i in find_vars(i)]\n elif isinstance(i, int):\n append(i)\n else:\n msg = \"column iterable should contain only int or str\"\n raise TypeError(msg)\n if len(new_index) != len(set(new_index)):\n msg = \"columns cannot be repeated; use -clonevar- to copy\"\n raise ValueError(msg)\n return new_index\n if isinstance(index, slice):\n start, stop, step = index.start, index.stop, index.step\n if not isinstance(start, int) and start is not None:\n if isinstance(start, str):\n start = self._varlist.index(self._find_vars(start)[0])\n else:\n raise TypeError(\"column slice values must be str or int\")\n if not isinstance(stop, int) and stop is not None:\n if isinstance(stop, str):\n stop = self._varlist.index(self._find_vars(stop)[0])\n else:\n raise TypeError(\"column slice values must be str or int\")\n return slice(start, stop, step)\n msg = \"column should be index (int), name (str), slice, or iterable\"\n raise TypeError(msg)", "def slice_layer(start, end, step=None, axis=1):\n if axis < 0:\n raise ValueError(\"'slice_layer' can only work on a specified axis > 0\")\n\n def slice_func(x):\n slices = [slice(None)] * axis\n slices.append(slice(start, end, step))\n return x[tuple(slices)]\n\n return Lambda(slice_func)", "def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)", "def structure_function(f, index=0):\n\n def structured_function(*args):\n pattern = args[index]\n evaluated = f(*args)\n evaluated[pattern == 0] = 0\n return evaluated\n\n return structured_function", "def __new__(cls, index: int) -> Expr:\n return Expr.__new__(cls)", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def visit_Subscript(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, self.index_to_expr(node.slice) ]\n return to_call(to_attribute(self.operator, 'getitem'), args)\n return node", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n return [self._main[key] for key in self._main._sequence[index]]\n else:\n return self._main[self._main._sequence[index]]", "def structure_function(f, index=0):\r\n\r\n def structured_function(*args):\r\n pattern = args[index]\r\n evaluated = f(*args)\r\n evaluated[pattern == 0] = 0\r\n return evaluated\r\n return structured_function", "def _evaluate(self, index):\n raise NotImplementedError", "def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)", "def index(x, axis, index_spec):\n idx = [slice(None)] * x.ndim\n idx[axis] = index_spec\n\n indexer = tuple(idx)\n return indexer", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor is not None:\n return torch.index_select(self.expr_tensor, dim=1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(1)\n else:\n return torch.index_select(self.expr_transposed_tensor, dim=-1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(-1)", "def GenerateSpecialFunction(n):\n return eval('lambda a: %s' % GenerateSpecialExpression(n))", "def _range_to_slice(index):\n if not len(index):\n return slice(None, 0, None)\n if any(i < 0 for i in index):\n raise ValueError(f'Could not convert {index} to a slice '\n '(contains negative elements)')\n increments_left = set(np.diff(index))\n step = increments_left.pop() if increments_left else 1\n if step == 0 or increments_left:\n raise ValueError(f'Could not convert {index} to a slice '\n '(unevenly spaced or zero increments)')\n start = index[0]\n stop = index[-1] + step\n # Avoid descending below 0 and thereby wrapping back to the top\n return slice(start, stop if stop >= 0 else None, step)", "def part(expr,address):\n for num in address:\n expr = expr.args[num]\n return expr", "def gen_array_index(self, expr: expressions.ArrayIndex):\n # Load base as an rvalue, to make sure we load pointers values.\n base = self.gen_expr(expr.base, rvalue=True)\n index = self.gen_expr(expr.index, rvalue=True)\n\n # Calculate offset:\n element_size = self.sizeof(expr.base.typ.element_type)\n index = self.builder.emit_cast(index, ir.ptr)\n offset = self.builder.emit_mul(index, element_size, ir.ptr)\n\n # Calculate address:\n return self.builder.emit_add(base, offset, ir.ptr)", "def index_stmt(self, idx):\n return Statement(\"index\", self, idx)", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def ind2sub(index,dims):\n subs = []\n ii = 0\n for y in range(dims[1]):\n for x in range(dims[0]):\n if index==ii:\n subs = [x,y]\n ii +=1\n return subs", "def partial ( index , func , x , h = 0 , I = 2 , err = False ) :\n \n if len(x) <= index :\n raise AttributeError(\"Invalid argument length/index %d/%d\" % ( len(x) , index ) )\n \n _x = [ float(a) for a in x ]\n \n ## create wrapper function \n def _wrap ( z ) :\n _z = _x[index] \n _x[index] = z\n _r = func ( *_x )\n _x[index] = _z\n return _r\n \n x_i = _x[ index ]\n return derivative ( _wrap , x = x_i , h = h , I = I , err = err )", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def vecToFunc(vector):\n def f(x):\n f = 0\n for i in range(len(vector)):\n f += vector[i]*x**i\n return f\n return f", "def rmap(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = len(self) - (-index - 1)\n\n\t\t\tresult = index + self.start\n\t\t\tif self & result != result:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn result\n\t\telif type(index) is slice:\n\t\t\treturn slice(\n\t\t\t\tself.rmap(index.start if index.start is not None else self.start),\n\t\t\t\tself.rmap((index.stop if index.stop is not None else len(self)) - 1))\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.rmap(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")", "def __getitem__(self, idx):\n if isinstance(idx, slice):\n # Insert extreme values if none are specified\n start = 0 if idx.start is None else idx.start\n stop = self._length if idx.stop is None else idx.stop\n step = 1 if idx.step is None or idx.step == 0 else idx.step\n\n # Convert any negative values to positive counterparts\n if start < 0:\n start = self._convert_negative_index(start)\n if stop < 0:\n stop = self._convert_negative_index(stop)\n if step < 1: # Need to flip the start and stop values\n start, stop = stop - 1, start - 1\n\n # Return a new array with the values specified by the slice\n slice_arr = DynamicArray(self._growth_factor)\n for i in range(start, stop, step):\n slice_arr.append(self._arr[i])\n return slice_arr\n\n else: # Integer index\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if 0 <= idx < self._length: # Check if index is within bounds\n return self._arr[idx]\n raise IndexError(\"Index out of bounds\")", "def __getitem__(self, index: slice) -> List:\n\n return self.data[index]", "def index(self, x) -> int:\n pass", "def sliced_fun(f, n_slices):\n\n def sliced_f(sliced_inputs, non_sliced_inputs=None):\n if non_sliced_inputs is None:\n non_sliced_inputs = []\n if isinstance(non_sliced_inputs, tuple):\n non_sliced_inputs = list(non_sliced_inputs)\n n_paths = len(sliced_inputs[0])\n slice_size = max(1, n_paths // n_slices)\n ret_vals = None\n for start in range(0, n_paths, slice_size):\n inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]\n slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))\n if not isinstance(slice_ret_vals, (tuple, list)):\n slice_ret_vals_as_list = [slice_ret_vals]\n else:\n slice_ret_vals_as_list = slice_ret_vals\n scaled_ret_vals = [\n np.asarray(v) * len(inputs_slice[0])\n for v in slice_ret_vals_as_list\n ]\n if ret_vals is None:\n ret_vals = scaled_ret_vals\n else:\n ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]\n ret_vals = [v / n_paths for v in ret_vals]\n if not isinstance(slice_ret_vals, (tuple, list)):\n ret_vals = ret_vals[0]\n elif isinstance(slice_ret_vals, tuple):\n ret_vals = tuple(ret_vals)\n return ret_vals\n\n return sliced_f", "def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n # Just be sure to catch this as an error in\n # the user's code earlier in the pipeline.\n return arr\n if isinstance(arr_t, TupleT):\n if isinstance(idx, Const):\n idx = idx.value\n\n assert isinstance(idx, int), \\\n \"Index into tuple must be an integer, got %s\" % idx\n if isinstance(idx, Const):\n idx = idx.value\n proj = self.tuple_proj(arr, idx)\n if temp:\n return self.assign_temp(proj, \"tuple_elt%d\" % idx if name is None else name)\n else:\n return proj\n\n if self.is_tuple(idx):\n indices = self.tuple_elts(idx)\n elif hasattr(idx, '__iter__'):\n indices = tuple(map(wrap_if_constant,idx))\n else:\n indices = (wrap_if_constant(idx),)\n\n n_required = arr_t.rank\n n_indices = len(indices)\n if n_indices < n_required:\n # all unspecified dimensions are considered fully sliced\n extra = (syntax_helpers.slice_none,) * (n_required - n_indices)\n indices = indices + extra\n\n if len(indices) > 1:\n idx = self.tuple(indices, \"index_tuple\" if name is None else name)\n else:\n idx = indices[0]\n\n t = arr_t.index_type(idx.type)\n idx_expr = Index(arr, idx, type=t)\n if temp:\n return self.assign_temp(idx_expr, \"array_elt\" if name is None else name)\n else:\n return idx_expr", "def ast_to_blitz_expr(ast_seq):\n # Don't overwrite orignal sequence in call to transform slices.\n ast_seq = copy.deepcopy(ast_seq)\n slice_handler.transform_slices(ast_seq)\n\n # Build the actual program statement from ast_seq\n expr = ast_tools.ast_to_string(ast_seq)\n\n # Now find and replace specific symbols to convert this to\n # a blitz++ compatible statement.\n # I'm doing this with string replacement here. It could\n # also be done on the actual ast tree (and probably should from\n # a purest standpoint...).\n\n # this one isn't necessary but it helps code readability\n # and compactness. It requires that\n # Range _all = blitz::Range::all();\n # be included in the generated code.\n # These could all alternatively be done to the ast in\n # build_slice_atom()\n expr = expr.replace('slice(_beg,_end)', '_all')\n expr = expr.replace('slice', 'blitz::Range')\n expr = expr.replace('[','(')\n expr = expr.replace(']', ')')\n expr = expr.replace('_stp', '1')\n\n # Instead of blitz::fromStart and blitz::toEnd. This requires\n # the following in the generated code.\n # Range _beg = blitz::fromStart;\n # Range _end = blitz::toEnd;\n #expr = expr.replace('_beg', 'blitz::fromStart' )\n #expr = expr.replace('_end', 'blitz::toEnd' )\n\n return expr + ';\\n'", "def up_index(index):\n return 2 * index", "def crop(dimension, start, end):\n def func(x):\n if dimension == 0:\n return x[start: end]\n if dimension == 1:\n return x[:, start: end]\n if dimension == 2:\n return x[:, :, start: end]\n if dimension == 3:\n return x[:, :, :, start: end]\n if dimension == 4:\n return x[:, :, :, :, start: end]\n return Lambda(func)", "def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def reconstruction(B, N):\n\n def _(f):\n return lambda *x: sum(f[i] * B(i)(*x) for i in range(N))\n\n return _", "def bar(expression, index, return_dict):\n return_dict[index] = factor(expression)\n print(index)", "def _operator_generator(self, index, conj):\n pterm = PauliTerm('I', 0, 1.0)\n Zstring = PauliTerm('I', 0, 1.0)\n for j in range(index):\n Zstring = Zstring*PauliTerm('Z', j, 1.0)\n\n pterm1 = Zstring*PauliTerm('X', index, 0.5)\n scalar = 0.5 * conj * 1.0j\n pterm2 = Zstring*PauliTerm('Y', index, scalar)\n pterm = pterm * (pterm1 + pterm2)\n\n pterm = pterm.simplify()\n return pterm", "def __getitem__(self,cle):\n return self.F(*cle)", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor:\n return dy.pick(self.expr_tensor, key, dim=len(self.expr_tensor.dim()[0])-1)\n else:\n return dy.pick(self.expr_transposed_tensor, key, dim=0)", "def inverted(self, index):\n interval = self.interval(index)\n return index + interval", "def __init__(self, function, integer_variable_indices, operator=np.floor, copy_arg=True):\n ComposedFunction.__init__(self, [function, self._flatten])\n self.integer_variable_indices = integer_variable_indices\n self.operator = operator\n self.copy_arg = copy_arg", "def replace_function(self, pfunc, index = -1):\n raise NotImplementedError()", "def rmap(self, index):\n\n\t\tif type(index) is int:\n\t\t\tif index < 0:\n\t\t\t\tindex = len(self) - (-index - 1)\n\n\t\t\tresult = self.start - index\n\t\t\tif self & result != result:\n\t\t\t\traise IndexError(f\"{index!r}: out of bounds\")\n\n\t\t\treturn result\n\t\telif type(index) is slice:\n\t\t\treturn slice(\n\t\t\t\tself.rmap(index.start if index.start is not None else self.start),\n\t\t\t\tself.rmap((index.stop if index.stop is not None else len(self)) - 1))\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn self.rmap(index.__index__())\n\t\t\texcept:\n\t\t\t\traise ValueError(f\"{index!r}: bad index\")\n\n\t\traise ValueError(f\"{index!r}: bad index\")", "def index_to_feature(p, dims):\n feature = []\n for dim in dims:\n feature.append(p % dim)\n p //= dim\n return feature", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def test_lambda(n):\n return [lambda v=i: v for i in range(n)]", "def reverse_slice(n):\n return n[::-1]", "def eval_key(self, index):\n return self._EVAL_PREFIX + str(index)", "def __getitem__(self,k):\n if type(k) is IntType: return self.data[k, 0]\n \n vec = [type(x) is SliceType for x in k]\n \n if True in vec: #suppose only one slice\n ii=vec.index(True)\n indices=[]\n k = list(k)\n import numpy\n rep = numpy.zeros((self.dims[ii],), 'd')\n for i in range(self.dims[ii]):\n k[ii] = i\n rep[i] = self.data[self.comp(k), 0]\n return rep\n else:\n return self.data[self.comp(k), 0]", "def advanced_indexing_op(input, index):\n batch_size = tf.shape(input)[0]\n max_length = int(input.get_shape()[1])\n dim_size = int(input.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (index - 1)\n flat = tf.reshape(input, [-1, dim_size])\n relevant = tf.gather(flat, index)\n return relevant", "def evaluate_as_vector(self, chain_state): \n def vector_representation(n, ordering, it):\n return self.mapping.subspace(zip(ordering,it))\n return self._evaluate(vector_representation, chain_state)", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def _state_space_index_to_features(self, index):\n factor = []\n for base in self.factor_bases:\n f = math.floor(index/base)\n factor.append(f)\n index = index - f*base\n \n return factor", "def __getitem__(self, key):\n if isinstance(key, slice):\n return [self._to_document(x) for x in self.query[key]]\n elif isinstance(key, int):\n return self._to_document(self.query[key])\n else:\n raise TypeError(\"Indices must be integers or slices!\")", "def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()", "def index(i, j):\n return i * N + j", "def offset_edge_index(\n node_slices: Dict[NodeType, Tuple[int, int]],\n edge_type: EdgeType,\n edge_index: Tensor,\n) -> Tensor:\n src, _, dst = edge_type\n offset = [[node_slices[src][0]], [node_slices[dst][0]]]\n offset = torch.tensor(offset, device=edge_index.device)\n return edge_index + offset", "def __getitem__(self, index):\n return self.data[index[0] - 1][index[1] - 1]", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __getitem__(self, ind):\n try:\n\n if isinstance(ind, slice):\n if ind.start is None:\n start = self.increments\n else:\n start = ind.start + self.increments\n\n if ind.stop is not None:\n stop = ind.stop + self.increments\n\n ind = slice(start, stop)\n else:\n ind += self.increments\n\n return self.data.iloc[ind,:]\n\n except IndexError:\n warning('DataEngine: Index out of bounds')\n return None" ]
[ "0.6217184", "0.5981041", "0.5913932", "0.5875691", "0.57255584", "0.56947947", "0.55419147", "0.5474115", "0.5463078", "0.5445567", "0.5418668", "0.53971356", "0.5378227", "0.53756636", "0.5320697", "0.53108877", "0.5268785", "0.52610666", "0.52600414", "0.5241331", "0.5241322", "0.52279806", "0.52236474", "0.52119666", "0.5209811", "0.5186041", "0.51789296", "0.51757604", "0.5171783", "0.5171351", "0.514951", "0.5148782", "0.5141527", "0.5111797", "0.5107519", "0.5100694", "0.50968385", "0.5090872", "0.5084313", "0.507669", "0.50759417", "0.5070808", "0.50377333", "0.50347286", "0.50330603", "0.5025388", "0.5015507", "0.49925336", "0.49914613", "0.49690837", "0.4954935", "0.49309328", "0.4917943", "0.49040595", "0.4903141", "0.4899219", "0.4884327", "0.48674768", "0.48654985", "0.48635992", "0.48624888", "0.48611304", "0.48456016", "0.4841455", "0.48400214", "0.48395488", "0.4821053", "0.48092952", "0.48032442", "0.4797214", "0.47959492", "0.47862002", "0.47832263", "0.47813445", "0.47805884", "0.4780448", "0.4778606", "0.47784272", "0.47777605", "0.47773346", "0.47713354", "0.476775", "0.4767571", "0.47527996", "0.47527996", "0.4748063", "0.47450247", "0.47432566", "0.47431692", "0.4742113", "0.47418267", "0.47408888", "0.47405225", "0.4736763", "0.47359455", "0.4735087", "0.47320664", "0.47295383", "0.47098058", "0.4703356" ]
0.7314768
0
Convert indexing to `getitem` call.
def visit_Subscript(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): args = [ node.value, self.index_to_expr(node.slice) ] return to_call(to_attribute(self.operator, 'getitem'), args) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __getitem__(self, index):\n # type: (int) -> Any\n items = list.__getitem__(self, index)\n return type(self)(self._values(items)) if isinstance(index, slice) else self.value(items)", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n return [self._main[key] for key in self._main._sequence[index]]\n else:\n return self._main[self._main._sequence[index]]", "def __getitem__(self, idx):\n pass", "def __getitem__(self, idx):\n pass", "def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)", "def __getitem__(self, idx):\n if not isinstance(idx, (slice, numbers.Integral)):\n raise ValueError('Index indices must be integers')\n if isinstance(idx, slice):\n if idx.step not in (None, 1):\n raise IndexError('Index does not support variable stepping')\n s, e = None, None\n if idx.start is not None:\n s = idx.start\n if s < 0:\n s += len(self)\n s = self.lookup(s)\n if idx.stop is not None:\n e = idx.stop\n if e >= len(self):\n e = None\n else:\n e = self.lookup(e)\n idx = slice(s, e)\n else:\n idx = self.lookup(idx)\n return self.src[idx]", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n pass", "def __getitem__(self, index):\n pass", "def __getitem__ (self, index):\n pass", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(sliceOrIdentifier):", "def __getitem__(self, idx):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n return self.data[nidx]", "def __getitem__(self,index):\n return self._data[index[0]][index[1]]", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)", "def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]", "def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)", "def __getitem__(self, index):\n return self.to_list()[index]", "def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def __getitem__(self, idx):\n return self._data[idx]", "def __getitem__(self, index: int) -> T:\n pass", "def __getitem__(self, index):\n\t\treturn self.data[index]", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __getitem__(self, idx):\n if idx < 0 or idx >= self.length():\n raise KeyError()\n return self.data[idx]", "def __getitem__ (self, idx):\n return self.row(idx[0])[idx[1]]", "def __getitem__(self, index):\n if isinstance(index, int):\n return list.__getitem__(self, index)\n if isinstance(index, tuple):\n return list.__getitem__(self, index[0])[index[1]]\n raise TypeError, \"Table indices must be int or tuple\"", "def __getitem__(self, idx):\n if len(idx) == 1:\n return self.rows[idx[0]]\n else:\n return self.rows[idx[0]][idx[1]]", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def __getitem__(self, index):\n return index, super().__getitem__(index)", "def __getitem__(self, idx):\n return self.items[idx]", "def __getitem__(self, ind):\n if not isinstance(ind, (str, unicode)):\n raise TypeError('Supply a valid str for the index')\n if self.indices[0] == ind:\n return self.x\n if self.indices[1] == ind:\n return self.y\n if self.indices[2] == ind:\n return self.z\n else:\n raise ValueError('Not a defined index')", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n return self._points[index[0]]\n elif len_var==2:\n return self._points[index[0]][index[1]]\n else:\n raise IndexError(\"Received too long index.\")\n return self._points[index]", "def __getitem__(self, *args):\n return self.data.__getitem__(*args)", "def __getitem__(self, index):\n try:\n if isinstance(index, int):\n # the only reliable way is to iterate up to the index:\n return next(islice(self, index, None))\n if isinstance(index, slice):\n return list(islice(self, index.start, index.stop, index.step))\n else:\n key_return = list(self._dictitem_gen(index))\n if self.KEY_ACCESS_REDUCE_SINGLETONS and len(key_return) == 1:\n return key_return[0]\n else:\n return key_return\n except StopIteration:\n raise IndexError(\"list index out of range\")", "def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError", "def __getitem__(self, ids):\n if isinstance(ids, tuple):\n return self.__getnum__(ids[0], ids[1])\n\n elif isinstance(ids, int):\n return self.__getpos__(ids)\n\n else:\n raise RuntimeError(\"No such indexing exist\")", "def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n else:\n with reraise_getitem_errors(TypeError, KeyError, IndexError):\n node = self.get_tree_entries()[index]\n return self._defining_context.infer_node(node)", "def __getitem__(self, idx):\n # if key is slice, return a new HSP instance\n if isinstance(idx, slice):\n obj = self.__class__(self._items[idx])\n self._transfer_attrs(obj)\n return obj\n return self._items[idx]", "def __getitem__(self, index):\n if self._constructed is False:\n self._not_constructed_error(index)\n\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n try:\n index = self._processUnhashableIndex(index)\n except TypeError:\n # This index is really unhashable. Set a flag so that\n # we can re-raise the original exception (not this one)\n index = TypeError\n if index is TypeError:\n raise\n if index.__class__ is _IndexedComponent_slice:\n return index\n # The index could have contained constant but nonhashable\n # objects (e.g., scalar immutable Params).\n # _processUnhashableIndex will evaluate those constants, so\n # if it made any changes to the index, we need to re-check\n # the _data dict for membership.\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n obj = _NotFound\n\n if obj is _NotFound:\n # Not good: we have to defer this import to now\n # due to circular imports (expr imports _VarData\n # imports indexed_component, but we need expr\n # here\n from pyomo.core.expr import current as EXPR\n if index.__class__ is EXPR.GetItemExpression:\n return index\n validated_index = self._validate_index(index)\n if validated_index is not index:\n index = validated_index\n # _processUnhashableIndex could have found a slice, or\n # _validate could have found an Ellipsis and returned a\n # slicer\n if index.__class__ is _IndexedComponent_slice:\n return index\n obj = self._data.get(index, _NotFound)\n #\n # Call the _getitem_when_not_present helper to retrieve/return\n # the default value\n #\n if obj is _NotFound:\n return self._getitem_when_not_present(index)\n\n return obj", "def __getitem__(self, index):\n if isinstance(index, slice):\n return Vetor(self.elem[index])\n else:\n return self.elem[index]", "def __getitem__(self, key):\n if isinstance(key, slice):\n return [self._to_document(x) for x in self.query[key]]\n elif isinstance(key, int):\n return self._to_document(self.query[key])\n else:\n raise TypeError(\"Indices must be integers or slices!\")", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n # fetching a slice returns an OrderedDict\n return self._main[index].items()\n key = self._main._sequence[index]\n return (key, self._main[key])", "def __getitem__(self, index):\n return self.array[index]", "def __getitem__(self, index):\n return self._terms[index]", "def __getitem__(self, keys):\n if isinstance(keys, int):\n return super().__getitem__(keys)\n elif isinstance(keys, list):\n return self.vectors(keys)\n elif isinstance(keys, str):\n return self.find(keys)\n elif isinstance(keys, tuple):\n return self.item(keys[0], keys[1])\n elif isinstance(keys, slice):\n return super().__getitem__(keys)", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else: return dy.pick(self.expr_tensor, key)", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data", "def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass", "def __getitem__(self, index):\n return getattr(self, self.__slots__[index])", "def __getitem__(self, index):\n # NOTE: this automatically supports slicing :-)\n return self._main._sequence[index]", "def __getitem__(self, index):\n return self.values[index]", "def __getitem__(self, index):\n if index < 0:\n raise Exception(\"index can't be negative\")\n if index < self.cycle_begin:\n return self.index_to_result[index]\n cycle_offset = (index - self.cycle_begin) % self.cycle_length\n return self.index_to_result[self.cycle_begin + cycle_offset]", "def __getitem__(self, key):\n # Both row index and columns given\n if isinstance(key, tuple):\n index, column = key\n index = self._slice_index(index) if isinstance(index, slice) else index\n return self.get(indexes=index, columns=column, as_list=True)\n # Row indexed with slice, all columns\n elif isinstance(key, slice):\n return self.get(indexes=self._slice_index(key), as_list=True)\n # Single row\n else:\n return self.get(indexes=key, as_list=True)", "def __getitem__(self, index):\n if not isinstance(index, tuple) or not 1 <= len(index) <= 2:\n msg = \"data subscripting must be [rows,cols] or [rows,]\"\n raise ValueError(msg)\n sel_rows = self._check_index(self._nobs, index[0])\n sel_cols = (self._convert_col_index(index[1]) \n if len(index) == 2 else None)\n sel_cols = self._check_index(self._nvar, sel_cols)\n # call instance constructor\n return self.__class__(self, sel_rows, sel_cols)", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def __getitem__(self, index):\n if self.valid_index(index):\n return self._data[index]\n else:\n return IndexError", "def __getitem__(self, index):\r\n return self._items[index]", "def __getitem__(self, index):\n return self.position[index]", "def __getitem__(self, item_index: Index) -> Item:\n raise NotImplementedError(\"__getitem__\")", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def __getitem__(self, index):\n try:\n i, j = index\n except (AssertionError, TypeError):\n raise IndexError('invalid index')\n\n if not np.isscalar(i) and np.isscalar(j):\n warn('Indexing into a lil_matrix with multiple indices is slow. '\n 'Pre-converting to CSC or CSR beforehand is more efficient.',\n SparseEfficiencyWarning)\n\n if np.isscalar(i):\n if np.isscalar(j):\n return self._get1(i, j)\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(i, jj) for jj in j]])\n elif issequence(i) and issequence(j):\n return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])\n elif issequence(i) or isinstance(i, slice):\n if isinstance(i, slice):\n i = self._slicetoseq(i, self.shape[0])\n if np.isscalar(j):\n return self.__class__([[self._get1(ii, j)] for ii in i])\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])\n else:\n raise IndexError", "def __getitem__(self, idx):\n\t\t# String - single channel\n\t\tif isinstance(idx, basestring):\n\t\t\treturn self.data[idx]\n\n\t\t# List - ambiguous, need to check contents\n\t\telif isinstance(idx, list):\n\n\t\t\t# Of strings - subset of channels\n\t\t\tif isinstance(idx[0], basestring):\n\t\t\t\treturn self.data[idx]\n\n\t\t\t# Otherwise assume ints or bools to index rows\n\t\t\telse:\n\t\t\t\treturn self.data.iloc[idx]\n\n\t\t# Tuple - rows and channels\n\t\telif isinstance(idx, tuple):\n\n\t\t\trows, channels = idx\n\n\t\t\t# Get correct channels first\n\t\t\tif isinstance(channels, basestring): # Channel name\n\t\t\t\tdf = self.data[channels]\n\t\t\telif isinstance(channels, (int, long)): # Channel index\n\t\t\t\tdf = self.data[self._channels[channels]]\n\t\t\telif isinstance(channels, list):\n\t\t\t\tif isinstance(channels[0], basestring): # List of names\n\t\t\t\t\tdf = self.data[channels]\n\t\t\t\telif isinstance(channels[0], (int, long)): # List of indices\n\t\t\t\t\tdf = self.data[[self._channels[i] for i in channels]]\n\t\t\t\telse:\n\t\t\t\t\traise TypeError(\n\t\t\t\t\t\t'Second index must contain channel names or '\n\t\t\t\t\t\t'positions')\n\t\t\telif channels is None:\n\t\t\t\tdf = self.data\n\t\t\telse:\n\t\t\t\traise TypeError(\n\t\t\t\t\t'Second index must contain channel names or positions')\n\n\t\t\t# Now index rows\n\t\t\tif rows is None:\n\t\t\t\treturn df\n\t\t\telse:\n\t\t\t\treturn df.iloc[rows]\n\n\t\t# Other - assume int, pandas.Series or numpy.ndarray\n\t\telse:\n\t\t\treturn self.data.iloc[idx]", "def __getitem__(self, index):\n # check whether the requested index is available.\n # raise an error if not\n # [BUG] ? self._nrows-1: -> self._nrows:\n if index > self._nrows-1:\n err_msg = 'Index: '+str(index)+' is larger than nrows: '\\\n +str(self._nrows)+'!!'\n raise Exception(err_msg)\n\n # return the value at the index\n return self._data[index]", "def __getitem__(self,idx):\n return self.g[idx]", "def __getitem__(self, index):\n return self._nums[index]", "def __getitem__(self, idx):\r\n if self.is_superset:\r\n for ds in self.data:\r\n if idx >= len(ds):\r\n continue\r\n return ds[idx]\r\n else:\r\n return self.data[idx]", "def __getitem__(self, index):\n return self.dataset[index]", "def __getitem__(self, idx):\n return self.data.iloc[idx]", "def __getitem__(self, key):\n return self()[key]", "def __getitem__(self, idx):\n tp = type(idx)\n if tp == list:\n # Return list corresponding to lis of indices #\n answer = []\n for i in idx:\n answer.append(self[i])\n return answer\n #\n elif tp == int:\n # Look for CrossSection with this mt number #\n for xs in self.cross_sections:\n if xs.mt == idx:\n return xs\n #\n raise IndexError(\n 'mt = %s not found in PsedoExperiment instance.' % str(idx))\n elif tp == str:\n # Look for CrossSection with this reaction type #\n for xs in self.cross_sections:\n if xs.sammy_type[:max(3,len(idx))] == idx:\n return xs\n #\n raise IndexError(\n 'xs type = %s not found in PsedoExperiment instance.' % idx)\n else:\n # If idx not of any of the above types:\n raise ValueError('%s type not allowed for indexing.' % str(tp))\n #", "def __getitem__(self, ind):\n try:\n\n if isinstance(ind, slice):\n if ind.start is None:\n start = self.increments\n else:\n start = ind.start + self.increments\n\n if ind.stop is not None:\n stop = ind.stop + self.increments\n\n ind = slice(start, stop)\n else:\n ind += self.increments\n\n return self.data.iloc[ind,:]\n\n except IndexError:\n warning('DataEngine: Index out of bounds')\n return None", "def relay_tuple_getitem(c, t, idx):\n assert idx.is_constant(int)\n return relay.expr.TupleGetItem(c.ref(t), idx.value)", "def __getitem__(self, index):\n return self.seq[index]", "def __getitem__(self, ndx):\n if type(ndx) is slice:\n return list(islice(self._all(), ndx.start, ndx.stop, ndx.step or 1))\n else:\n return islice(self._all(), ndx, ndx+1).next()", "def __getitem__( self, index ) :\n\n return( self.__entries[index] )", "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "def __getitem__(self, name_idx):\n if isinstance(name_idx, str):\n return self.atom_dict[name_idx]\n elif isinstance(name_idx, int):\n return self.atom_list[name_idx]\n raise TypeError, name_idx", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor:\n return dy.pick(self.expr_tensor, key, dim=len(self.expr_tensor.dim()[0])-1)\n else:\n return dy.pick(self.expr_transposed_tensor, key, dim=0)", "def __getitem__(self, key):\n if self.expr_list: return self.expr_list[key]\n else:\n if key < 0: key += len(self)\n if self.expr_tensor is not None:\n return torch.index_select(self.expr_tensor, dim=1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(1)\n else:\n return torch.index_select(self.expr_transposed_tensor, dim=-1, index=torch.LongTensor([key]).to(xnmt.device)).squeeze(-1)", "def __getitem__(self, index: int) -> T:\n node_at_index = self.__get_node_at_index(index)\n return node_at_index.item", "def __getitem__(self,cle):\n return self.F(*cle)", "def __getitem__(self, i):\n return self.__x[i]", "def __getitem__(self, chain_idx):\n if isinstance(chain_idx, str):\n return self.chain_dict[chain_idx]\n elif isinstance(chain_idx, int):\n return self.chain_list[chain_idx]\n raise TypeError, chain_idx", "def __getitem__(self, index):\n if index >= self.size:\n raise KeyError\n else:\n return self._get_item(index)", "def __getitem__(self, index):\n if isinstance(index, slice):\n return TokenList(self.token_list[index.start:index.stop:index.step])\n if index < 0: # Handle negative indices.\n index += len(self)\n return self.token_list[index]", "def __getitem__(self,key):\n return self.x[key]", "def __getitem__(self, idx):\n return self.transform(self.X[idx]), self.y[idx]", "def __getitem__(self, index):\n return self.cellData[index]", "def __getitem__(self, index: slice) -> List:\n\n return self.data[index]" ]
[ "0.7552691", "0.7426195", "0.7328249", "0.7320789", "0.7315738", "0.7315738", "0.72576785", "0.7215699", "0.7099744", "0.7099744", "0.7095745", "0.7095745", "0.7084862", "0.7081519", "0.70618874", "0.70596737", "0.7020587", "0.69942844", "0.69942844", "0.69866604", "0.69844437", "0.69640005", "0.6947249", "0.69235134", "0.6910069", "0.69098216", "0.68850446", "0.6884769", "0.6867099", "0.6867099", "0.6845035", "0.68447185", "0.68156254", "0.68016636", "0.6792196", "0.6792166", "0.6789614", "0.67799133", "0.6775123", "0.6759829", "0.6759829", "0.67291564", "0.6725096", "0.67000663", "0.669162", "0.66914177", "0.66910267", "0.6666951", "0.6661561", "0.6650056", "0.6648439", "0.6647629", "0.6647359", "0.6641459", "0.6625423", "0.6624069", "0.6618422", "0.6618422", "0.66166025", "0.66132045", "0.661169", "0.6611434", "0.66050625", "0.66045374", "0.65983844", "0.6587628", "0.65830505", "0.6577601", "0.6558328", "0.655336", "0.65446454", "0.6539581", "0.65375787", "0.6533555", "0.6526209", "0.6511357", "0.65106905", "0.64768577", "0.6476114", "0.6470259", "0.64677876", "0.6458859", "0.6435912", "0.6432668", "0.6421503", "0.64206946", "0.6411166", "0.640908", "0.63979626", "0.63790125", "0.6377011", "0.6365433", "0.63571984", "0.6346618", "0.634658", "0.6342253", "0.6339519", "0.6338", "0.6334104", "0.6334019", "0.6325205" ]
0.0
-1
Convert indexed assignment to `setitem` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'setitem') args = [target.value, self.index_to_expr(target.slice), node.value] return ast.Expr(to_call(fun, args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def __setitem__(key, value):", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def setitem(obj, idx, value):\n obj.__setitem__(idx, value)", "def __setitem__(self, index, value):\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError, \"Table indices must be int or tuple\"", "def __setitem__(self, index, item):\n if isinstance(index, types.SliceType):\n # NOTE: item must be an iterable (list of tuples)\n self._main[index] = OrderedDict(item)\n else:\n # FIXME: Does this raise a sensible error?\n orig = self._main.keys[index]\n key, value = item\n if self._main.strict and key in self and (key != orig):\n raise ValueError('slice assignment must be from '\n 'unique keys')\n # delete the current one\n del self._main[self._main._sequence[index]]\n self._main.insert(index, key, value)", "def __setitem__(self, index, value):\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value", "def __setitem__(self, key, value):\n # Both row index and columns given\n if isinstance(key, tuple):\n index, column = key\n index = self._slice_index(index) if isinstance(index, slice) else index\n return self.set(indexes=index, columns=column, values=value)\n # Row indexed with slice, all columns\n elif isinstance(key, slice):\n return self.set(indexes=self._slice_index(key), values=value)\n # Single row\n else:\n return self.set(indexes=key, values=value)", "def __setitem__(self, key, val):\n self()[key] = val", "def __setitem__(self, index, value):\n self.elem[index] = value", "def __setitem__(self, key, val):\n self[key][...] = val", "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])],\n value)", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val", "def _setitem_impl(self, index, obj, value):\n obj.set_value(value)\n return obj", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, index, value):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._setitem2d(index, value)\n elif self.dim == 3:\n return self._setitem3d(index, value)", "def __setitem__(self, key, value):\n if isinstance(key, (list, tuple)):\n self.assign_block(key, value)\n else:\n self.assign_value(key, value)", "def __setitem__(self, inds, value):\n i, j = inds\n self.array[i][j] = value", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):", "def __setitem__(self, index, value):\n if isinstance(index, types.SliceType):\n keys = self._main._sequence[index]\n if len(keys) != len(value):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(keys)))\n # FIXME: efficiency? Would be better to calculate the indexes\n # directly from the slice object\n # NOTE: the new keys can collide with existing keys (or even\n # contain duplicates) - these will overwrite\n for key, val in zip(keys, value):\n self._main[key] = val\n else:\n self._main[self._main._sequence[index]] = value", "def __setitem__(self, key, val):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__setitem__')(key, val)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n lval = _ensure_len(len(key), val)\n for i, k in enumerate(key):\n operator.__setitem__(self, k, lval[i])\n elif isinstance(key, slice):\n lval = val\n if not isinstance(val, collections.Iterable):\n slice_len = len([i for i in range(*key.indices(len(self)))])\n lval = _ensure_len(slice_len, val)\n list.__setitem__(self, key, lval)\n else:\n list.__setitem__(self, key, val)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n lval = _ensure_len(len(key), val)\n for i, k in enumerate(key):\n operator.__setitem__(self[i], k, lval[i])\n elif isinstance(key, tuple):\n lval = _ensure_len(len(self), val)\n try:\n for i, x in enumerate(self):\n operator.__setitem__(x, key, lval[i])\n except Exception:\n for i, x in enumerate(self):\n for j, k in enumerate(key):\n operator.__setitem__(x, k, lval[i][j])\n else:\n lval = _ensure_len(len(self), val)\n for i, x in enumerate(self):\n operator.__setitem__(x, key, lval[i])\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__setitem__', k, v) and apply(operators.__setitem__, k, v)\n return self", "def _single_setitem(self, key, item):\n getattr(self._cpp_obj, self._setter)(key, item)", "def __setitem__(self, key, value):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)", "def __setitem__(self, k, v):\n\t\treturn setattr(self, k, v)", "def __setitem__(self, index, value):\n self.position[index] = value", "def __setitem__(self, index, value):\n self.buffer[index] = value", "def __setitem__(self, index, value):\n if not isinstance(index, tuple) or len(index) > 2:\n msg = \"data subscripting must be [rows,cols] or [rows,]\"\n raise ValueError(msg)\n sel_rows = self._check_index(self._nobs, index[0])\n sel_cols = (self._convert_col_index(index[1])\n if len(index) == 2 else None)\n sel_cols = self._check_index(self._nvar, sel_cols)\n \n nrows, ncols = len(sel_rows), len(sel_cols)\n \n value = self._standardize_input(value)\n \n # Reformation above is wrong for a single-row assignment, where\n # values [val1, val2, ...] should be interpreted as \n # single row: [[val1, val2, ...]]. Procedure above makes it \n # into [[val1], [val2], ...] (the correct assumption otherwise).\n if (nrows == 1 and ncols != 1 and \n len(value) == ncols and all(len(v) == 1 for v in value)):\n value = (tuple(v[0] for v in value),)\n else: # check that value dimensions match expected\n if not len(value) == nrows:\n raise ValueError(\"length of value does not match # of rows\")\n if not all(len(v) == ncols for v in value):\n raise ValueError(\"inner dimensions do not match # of columns\")\n \n # If no rows or no cols, nothing to do.\n # Could put this above the call to _standardize_input, \n # but then input of any shape allowed.\n if nrows == 0 or ncols == 0:\n return\n \n self._set_values(sel_rows, sel_cols, value)\n \n # Modify srtlist if necessary. If col_num is in srtlist, drop it\n # and any to the right. Ideally, would only make this change if \n # values were truly changed, by comparing new value with old.\n srtlist = self._srtlist\n nvar = self._nvar\n for col_num in sel_cols:\n if col_num in srtlist:\n srt_pos = srtlist.index(col_num)\n srtlist = srtlist[:srt_pos] + [None]*(nvar - srt_pos)\n self._srtlist = srtlist\n \n self._changed = True", "def __setitem__(self, ind: int, value: float) -> None:", "def __setitem__(name, other):", "def __setitem__(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n self.data[nidx] = value", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def __setitem__(self, item_index: Index, new_item: Item) -> None:\n raise NotImplementedError(\"__setitem__\")", "def __setitem__(self, index: Any, value: Any) -> None:\n self.contents[index] = value\n return", "def __setitem__(self,i,v):\n _items[i] = v", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def __setitem__(self, i, value):\n self._ar[i] = value", "def __setitem__(self, index, value):\n if not isinstance(index, numbers.Integral):\n raise TypeError(\"Input index must be integer\")\n if index >= len(self._fsm.get(self._id)):\n raise ValueError(\"Input index is out of boundary\")\n ts = self._fsm.get(self._id)\n ts[index] = value\n self._fsm.store(self._id, ts)", "def __setitem__(self, key, val):\n self.set[key] = val", "def __setitem__(self, index, item):\n # type: (int, Any) -> None\n items = self._refs(item) if isinstance(index, slice) else self.ref(item)\n return list.__setitem__(self, index, items)", "def __setitem__(self, i, v):\n raise TypeError(\"'Factorization' object does not support item assignment\")", "def __setitem__(self, index, value):\n self._timeseriesData[index] = value", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, val):\r\n if not isinstance(key, basestring):\r\n raise TypeError\r\n if key not in self._columns.keys():\r\n raise KeyError\r\n return setattr(self, key, val)", "def __setitem__(self, index, name):\n if isinstance(index, types.SliceType):\n # FIXME: efficiency?\n # check length is the same\n indexes = range(len(self._main._sequence))[index]\n if len(indexes) != len(name):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(indexes)))\n # check they are the same keys\n # FIXME: Use set\n old_keys = self._main._sequence[index]\n new_keys = list(name)\n old_keys.sort()\n new_keys.sort()\n if old_keys != new_keys:\n raise KeyError('Keylist is not the same as current keylist.')\n orig_vals = [self._main[k] for k in name]\n del self._main[index]\n vals = zip(indexes, name, orig_vals)\n vals.sort()\n for i, k, v in vals:\n if self._main.strict and k in self._main:\n raise ValueError('slice assignment must be from '\n 'unique keys')\n self._main.insert(i, k, v)\n else:\n raise ValueError('Cannot assign to keys')", "def __setitem__(self, index, value):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n self._points[index[0]] = value\n elif len_var==2:\n # safeguard against empty entries\n if index[0] not in self._points:\n self._points[index[0]] = StatePoint()\n self._points[index[0]][index[1]] = value\n else:\n raise IndexError(\"Received too long index.\")\n else:\n self._points[index] = value", "def __setitem__(self, key, value):\n self.__dict__[key] = value", "def __setitem__(self, index, newItem):\r\n self._items[index] = newItem", "def __setitem__(self, name, obj):", "def __setitem__(self, index_tuple, value):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n array_1d[col] = value", "def __setitem__(self, key, val):\n x, y = key\n self.matrix[y][x] = val", "def __setitem__(self, name, value):\r\n return self.set(name=value)", "def __setitem__(self, item, value):\n index = self.reindex(item)\n self.parent.__setitem__(index, value)", "def __setitem__(self, key, value):\r\n T=type(key)\r\n if T!=types.IntType and T!=types.LongType:\r\n raise TypeError, \"index must be integer\"\r\n\r\n if key==0: self.x = value\r\n elif key==1: self.y = value\r\n elif key==2: self.z = value\r\n elif key==3: self.w = value\r\n else:\r\n raise IndexError,\"index out of range\"", "def set(self, index, data):\n self.data[index] = data", "def __setitem__(self, key, val):\n dict.__setitem__(self, key, val)", "def __setitem__(self, arg, value):\n setattr(self.args, arg, value)", "def __setitem__(self, key, value):\r\n self.data[key] = value", "def __setitem__(self, name, value) -> None:\n self.__setattr__(name, value)", "def __setitem__(self, key, value):\n try:\n index = self.__keys.index(key)\n self.__vals[index] = value\n except ValueError:\n try:\n index = self.__vals.index(key)\n self.__keys[index] = value\n except ValueError:\n self.__keys.append(key)\n self.__vals.append(value)", "def __setitem__(self, attribute_name, value):\n pass # pragma: no cover", "def __setitem__(self, key, val):\n self.__check_key_validity(key)\n self.data[key[0]][key[1]] = val", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def __setitem__(self, idx, element):\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if not 0 <= idx < self._length: # Ignore indices outside of bounds\n raise IndexError(f'index {idx} out of bounds')\n self._arr[idx] = element", "def __setitem__(self, index, value):\n self.components[index] = value", "def __setitem__(self, index, value):\n if self.valid_index(index):\n if type(value) == str:\n self._data[index] = value\n else:\n raise TypeError\n else:\n raise IndexError", "def __setitem__(self, key, value):\n self.xg[key] = value", "def __setitem__(self, key, value):\n self.data[key] = value", "def __setitem__(self, key, value):\n self.data[key] = value", "def __setitem__(self, key, value):\n if key not in self.fields:\n raise KeyError(key)\n return setattr(self, key, value)", "def __setitem__(self, index, value):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n self._elements[index] = value", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n self.store.setdefault(s, dict())[a] = value", "def __setitem__(self, key, value):\n self.set_attribute(key, value)", "def __setitem__(self, i, val):\n\t\tif i < self.n:\n\t\t\tself.v[i] = val", "def __setitem__(self, index, value):\n self.points[index] = value", "def __setitem__(self, index, value):\n self.points[index] = value", "def __setitem__(self, item, val):\r\n item.set_value(val, borrow=True)", "def __setitem__(self, key: T, value: T) -> None:\n self.update(key, value)", "def __setitem__(self, pos, val):\n self._coords[pos] = val", "def __setitem__(self, key, value):\n self.df[key] = value", "def __setitem__(self, key, value):\n if not self._is_valid(value):\n value = self._fix_value(value)\n self._inner.__setitem__(key, value)", "def __setitem__(self, key, value):\n if '.' in key:\n self.assign(key, value)\n else:\n setattr(self, key, value)", "def __setitem__(self, index: int, item: Any) -> None:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Set the first item\n elif index == 0:\n self._first = item\n # Recurse on the _rest\n else:\n if not self._rest:\n raise IndexError\n self._rest.__setitem__(index - 1, item)", "def __setitem__(self, item, value):\n self.vars[item] = value", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n self.store.setdefault(s, dict())[a] = value", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def setitem(self, axis, key, value):\n\n def setitem(df, axis, key, value):\n if is_scalar(key) and isinstance(value, pandas.DataFrame):\n value = value.squeeze()\n if not axis:\n df[key] = value\n else:\n df.loc[key] = value\n return df\n\n return DataFrameDefault.register(setitem)(self, axis=axis, key=key, value=value)", "def __setitem__(self, key, value):\n self.put(key, value)", "def __setitem__(self, key, value):\n self.put(key, value)", "def _bucket_setitem(self, j, k, v):\n pass", "def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")", "def __setitem__(self, key, value):\n self._data[key] = value" ]
[ "0.7797051", "0.7401053", "0.73361707", "0.7178626", "0.7156668", "0.71429896", "0.7103829", "0.70937896", "0.6975121", "0.6972701", "0.69715893", "0.6964855", "0.69308025", "0.6930015", "0.69153154", "0.6901336", "0.6901336", "0.6901336", "0.6901336", "0.69008344", "0.6857378", "0.68511987", "0.68412405", "0.68412405", "0.68412405", "0.6837145", "0.68318826", "0.682454", "0.68172693", "0.6808618", "0.68011403", "0.67812335", "0.67797655", "0.676682", "0.67660207", "0.67603743", "0.6747301", "0.67298084", "0.67222893", "0.67143804", "0.6707841", "0.6651341", "0.66195875", "0.6619252", "0.6614778", "0.660038", "0.659298", "0.6592249", "0.6577398", "0.6577398", "0.6577398", "0.6563031", "0.65486777", "0.65360856", "0.6532824", "0.65206164", "0.650292", "0.6502673", "0.64995074", "0.64945954", "0.6489536", "0.64890456", "0.6473096", "0.64701605", "0.64645296", "0.6463294", "0.64322627", "0.64213955", "0.64201033", "0.6412822", "0.6412599", "0.640158", "0.6396836", "0.63961065", "0.6384936", "0.6379401", "0.6379401", "0.63691646", "0.6360289", "0.6358839", "0.63570833", "0.6346633", "0.63418764", "0.63418764", "0.633735", "0.6335071", "0.6331723", "0.63253474", "0.63189", "0.63156104", "0.62887937", "0.62860715", "0.6284449", "0.6283203", "0.6283028", "0.6270887", "0.6270887", "0.6258691", "0.6247575", "0.6231494" ]
0.6483923
62
Convert indexed `del` operation to `delitem` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'delitem') args = [ target.value, self.index_to_expr(target.slice) ] return ast.Expr(to_call(fun, args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __delitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n for k in sorted(key, reverse=True):\n operator.__delitem__(self, k)\n else:\n # Handles slices and ints. Other key types will fail.\n list.__delitem__(self, key)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n for i, k in enumerate(key):\n operator.__delitem__(self[i], k)\n elif isinstance(key, tuple):\n try:\n for x in self:\n operator.__delitem__(x, key)\n except Exception:\n for x in self:\n for k in key:\n operator.__delitem__(x, k)\n else:\n for x in self:\n operator.__delitem__(x, key)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__delitem__', k) and apply(operators.__delitem__, k)\n return self", "def __delitem__(self, index: int) -> None:\n error = self._coreIndex.removeDescriptor(index)\n assertError(error)", "def __delitem__(self, i):\n key = self._main._sequence[i]\n if isinstance(i, types.SliceType):\n for k in key:\n # FIXME: efficiency?\n del self._main[k]\n else:\n del self._main[key]", "def __delitem__(self, index: Any) -> None:\n del self.contents[index]\n return", "def __delitem__(self, idx):\n # note that this may result in an empty HSP object, which should be\n # invalid\n del self._items[idx]", "def __delitem__(self, idx):\n self.pop(idx)", "def __delitem__(self, index):\n # delete the column\n del self._data[index]\n\n # adjust the number of columns\n self._nrows -= 1", "def __delitem__(self, key, *args, **kwargs):\n self._del(key, *args, **kwargs)", "def __delitem__(self, index: int) -> None:\n del self._rows[index]", "def __delitem__(self, t: Tuple[int, ...]) -> None:\n ...", "def __delitem__(self, i: int) -> None:\n ...", "def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)", "def __delitem__(self, key: tuple):\n s, a = key\n del self.store[s][a]", "def delete_at_index(self, index: int) -> T:\n pass", "def delete_at_index(self, index: int) -> T:\n pass", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def _bucket_delitem(self, j, k):\n pass", "def _del(self, *args):\n return _ida_hexrays.ctree_items_t__del(self, *args)", "def _del(self, *args):\n return _ida_hexrays.qvector_carg_t__del(self, *args)", "def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)", "def __delitem__(self, key: tuple):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n del self.store[s][a]", "def delete_item(list_to_parse, item_index):\n del(list_to_parse[item_index]) # Remove the item\n return list_to_parse", "def __delitem__(self, index):\n del self.chromosome_list[index]", "def delete(self,\r\n index,\r\n notundoing=True,\r\n update_table=True):\r\n\r\n if self.read_only:\r\n display.noteprint((alerts.ATTENTION,'CANNOT EXECUTE: READ ONLY'))\r\n return {'keys': set(),\r\n 'text': '',\r\n 'meta': {}}\r\n self.indexchanged, self.indexchanged_key, self.indexchanged_tag = True, True, True\r\n self.indexchanges += 1\r\n\r\n\r\n if str(index) in self.indexes():\r\n self.display_buffer.append(index_reduce(str(index))+alerts.WAS_DELETED)\r\n self.delete_search_words(index,\r\n self.get_text_from_note(index))\r\n self.delete_keys_tags(index,\r\n self.get_keys_from_note(index))\r\n\r\n deletedmeta = self.get_metadata_from_note(index)\r\n deletedtext = self.get_text_from_note(index)\r\n deletedkeys = self.get_keys_from_note(index)\r\n\r\n if notundoing:\r\n self.done.add(('del',\r\n index,\r\n deletedkeys,\r\n deletedtext))\r\n\r\n self.delete_note(index)\r\n\r\n if update_table:\r\n self.default_dict['indextable'].delete(index)\r\n self.default_dict['indexlist'].delete(index)\r\n self.default_dict['indexlist_indexes'].delete(Index(index))\r\n self.changed = True\r\n if len(str(index)) == self.maxdepth_found:\r\n self.deepest(is_string=True,abridged=False)\r\n if len(index_reduce(str(index))) == self.abr_maxdepth_found:\r\n self.deepest(is_string=True,abridged=True)\r\n if self.project:\r\n for p_temp in self.project:\r\n self.default_dict['projects'].delete_index(index,\r\n project=p_temp)\r\n\r\n return {'keys': deletedkeys,\r\n 'text': deletedtext,\r\n 'meta': deletedmeta}", "def _del(self, *args):\n return _ida_hexrays.qvector_ccase_t__del(self, *args)", "def create_delete_item(doc, source_index):\n\n action = { 'delete' : { '_index' : source_index, '_type' : doc['_type'], '_id' : doc['_id'] } }\n return action", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def __delitem__(name):", "def __delitem__(self, key):\n self.f_remove(key)", "def __delitem__(self, key):\n if isinstance(key, types.SliceType):\n # FIXME: efficiency?\n keys = self._sequence[key]\n for entry in keys:\n dict.__delitem__(self, entry)\n del self._sequence[key]\n else:\n # do the dict.__delitem__ *first* as it raises\n # the more appropriate error\n dict.__delitem__(self, key)\n self._sequence.remove(key)", "def __delitem__(self, key: T) -> None:\n self.delete(key)", "def __delitem__(self, key):\n\t\tdel self.__dStore[key]", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def __delitem__(self, key):\n del self.list[key]", "def __delitem__(self,key):\n self.table.delItem(key,self.column)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def delete_index(\n self,\n ) -> Callable[\n [datastore_admin.DeleteIndexRequest], Awaitable[operations_pb2.Operation]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_index\" not in self._stubs:\n self._stubs[\"delete_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/DeleteIndex\",\n request_serializer=datastore_admin.DeleteIndexRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"delete_index\"]", "def __delitem__(self, idx):\n row, col = idx\n\n array_row = self._find_row_before(row)\n\n if (array_row.next_row == None or array_row.next_row.row_number > row):\n return\n\n target_row = array_row.next_row\n array_entry = self._find_column_before(target_row, col)\n\n if (array_entry.next_entry == None or array_entry.next_entry.column_number > col):\n return\n\n array_entry.next_entry = array_entry.next_entry.next_entry\n\n # If this row still has entries in it we are finished\n if target_row.row_sentinel.next_entry != None:\n return\n\n array_row.next_row = array_row.next_row.next_row", "def __delitem__(self, i):\n\n if self.mode == DB_OPEN_READ:\n raise RecordTableAccessError()\n\n # Check that the value was set (otherwise raise KeyError):\n self[i]\n self._set_packed_record(i, self.packer.empty_value)", "def delete_at_index(self, idx):\n self.timeseries = np.delete(self.timeseries, idx, axis=1)\n del self.ch_name[idx]\n del self.units[idx]", "def _del(self, *args):\n return _ida_hexrays.cinsnptrvec_t__del(self, *args)", "def delete(data, index):\n return data[:index] + data[index + 1:]", "def delete_element(some_list, index):\n del some_list[index]\n return some_list", "def delete(self, index):\n del self.data[index]", "def remove_index_from_key(self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n self.key_dict[key].remove(str(index))\r\n\r\n #with database\r\n if self.using_database:\r\n value_tuple = (notebookname,key,str(index))\r\n db_cursor.execute(\"DELETE FROM \"\r\n +\"keys_to_indexes \"\r\n +\"WHERE notebook=? \"\r\n +\"AND keyword=? \"\r\n +\"AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM \"\r\n +\"keys_to_indexes \"\r\n +\"WHERE notebook=? and keyword=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM \"\r\n +\"all_keys WHERE notebook=? \"\r\n +\"AND keyword=?;\",\r\n value_tuple[0:2])", "def _delete_index( env, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n adapter.delete( queries=[\"*:*\"] )\n adapter.commit()\n logger.info(u\"Deleted index\")", "def _del(self, *args):\n return _ida_frame.xreflist_t__del(self, *args)", "def __delitem__(self,key):\n if key in self.changed: self.changed.remove(key)\n if key not in self.deleted: self.deleted.append(key)\n del self.data[key]", "def delete(self, keyword, key):", "def __delitem__(self, key):\n del self._data[key]", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']", "def delete(get_index, document_id): \n client, index_name = connection_es()\n resp = client.delete(index = get_index, doc_type=\"nvisnx\", id = document_id)\n return resp", "def test_index_delete(self):\n a = self.test_index()\n a.delete()\n es = self.es\n es.refresh()\n r = es.search(query=StringQuery('zool'))\n eq_(r['hits']['total'], 0, \"We shouldn't get any hits.\")", "def removeItem(*args):", "def removeItem(*args):", "def __delitem__(self, nodename):\n\n for hash_ in self._repl_iterator(nodename):\n # will raise KeyError for nonexistent node name\n del self._nodes[hash_]\n index = bisect.bisect_left(self._keys, hash_)\n del self._keys[index]", "def __delitem__(self, nodename):\n\n for hash_ in self._repl_iterator(nodename):\n # will raise KeyError for nonexistent node name\n del self._nodes[hash_]\n index = bisect.bisect_left(self._keys, hash_)\n del self._keys[index]", "def del_row(self, row_index):\n ...", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def delete(self, key):", "def __delitem__(self, i):\n # An element of a policy function can't be deleted", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def _del(self, *args):\n return _ida_hexrays.qvector_history_t__del(self, *args)", "def __delitem__(self, key: int | slice) -> None:\n if isinstance(key, slice):\n for item in self._list[key]:\n ref = getattr(item, \"_labeled_widget_ref\", None)\n if ref:\n item = ref()\n self._widget._mgui_remove_widget(item)\n elif isinstance(key, int):\n item = self._list[key]\n ref = getattr(item, \"_labeled_widget_ref\", None)\n if ref:\n item = item._labeled_widget_ref() # type: ignore\n self._widget._mgui_remove_widget(item)\n else:\n raise TypeError(f\"list indices must be integers or slices, not {type(key)}\")\n del self._list[key]", "def __delitem__(self, key):\n del self._dict[key]\n del self._type_converter[key]", "def _Dynamic_DeleteIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"DeleteIndex\", request_id)\n return void", "def __delitem__(self, key):\n del self.elements[key]", "def __delitem__(self, where):\n with self._lock:\n self._current_bytes -= self._data[where]\n del self._data[where]\n self._order.remove(where)", "def deindex(self):\n self.deindex_value(self.proxy_get())", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def _numpy_delete(x, idx):\n # NB: numpy.delete is not yet available in JAX\n mask = jnp.arange(x.shape[0] - 1) < idx\n return jnp.where(mask.reshape((-1,) + (1,) * (x.ndim - 1)), x[:-1], x[1:])", "def delete_index_data(dir,data):\n db = IndexDb(dir)\n result = db.delete_from_index(data)\n return result", "def _delete_command_idxs(indexes, from_cmake):\n for index in sorted(indexes, reverse=True):\n del from_cmake[index]", "def delete(self, **kwargs):\n\n rst = self.del_sngl_pair(kwargs)\n return rst", "def _delete_object_inverted_index_terms(self, (object_type, object_id), ivtidx):\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def __delitem__(self, tid: int):\n del self._cache[tid]", "def __delitem__(self, k):\n j = self._hash_function(k)\n self._bucket_delitem(j, k)\n self._n -= 1", "def _map___delitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n self.erase(self.find(key))\n return", "def DeleteIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_DeleteIndex(self, arg0)", "def document_delete(index_name, doc_type, doc_id):\n resp = es.delete(index=index_name, doc_type=doc_type, id=doc_id)\n print(resp)", "def _delete_object_inverted_index_terms(self, obj, ivtidx):\n object_type, object_id = obj\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def DeleteIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_DeleteIndex(self, arg0)", "def __delitem__(self, key):\n if not isinstance(key, str) or '.' not in key:\n dict.__delitem__(self, key)\n return\n obj, token = _descend(self, key)\n del obj[token]", "def __delitem__(self, key):\n self.deleteCurve(key)", "def delete(self,key):\n\n pass", "def __delitem__(self, key):\n del self._ctx[key]", "def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()", "def __delitem__(self, key: Union[Any, int]) -> None:\n if isinstance(key, int):\n del self.contents[key]\n else:\n self.contents = [c for c in self.contents \n if denovo.unit.get_name(c) != key]\n return", "def _bucket_delitem(self, j, k):\n bucket = self._table[j]\n if bucket is None: # no match found\n raise KeyError(\"Key Error: \" + repr(k))\n del bucket[k]", "def __delitem__(self, index):\n def _removeBlock(blockIndex):\n block = self._doc.findBlockByNumber(blockIndex)\n if block.next().isValid(): # not the last\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n elif block.previous().isValid(): # the last, not the first\n cursor = QTextCursor(block.previous())\n cursor.movePosition(QTextCursor.EndOfBlock)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n else: # only one block\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n cursor.removeSelectedText()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n _removeBlock(index)\n elif isinstance(index, slice):\n \"\"\"List of indexes is reversed for make sure \n not processed indexes are not shifted during document modification\n \"\"\"\n start, stop, step = index.indices(self._doc.blockCount())\n if step > 0:\n start, stop, step = stop - 1, start - 1, step * -1\n\n for blockIndex in range(start, stop, step):\n _removeBlock(blockIndex)", "def discard_index_from_key(self,key,index):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n if key in self.key_dict:\r\n\r\n self.key_dict[key].discard(str(index))\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n value_tuple = (notebookname,key,str(index),)\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" keys_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND keyword=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM\"\r\n +\" keys_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" and keyword=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" all_keys WHERE notebook=?\"\r\n +\" AND keyword=?;\",\r\n value_tuple[0:2])", "def delete_item ( self ):\n list, index = self.get_info()\n self.value = list[:index] + list[index+1:]", "def _del(self, *args):\n return _ida_hexrays.qvector_lvar_t__del(self, *args)", "def __delitem__(self, key):\n\n del self._vertices[key]", "def __delitem__(self, key: Hashable) -> None:\n del self.contents[key]\n return", "def delete_index_field(DomainName=None, IndexFieldName=None):\n pass", "def test_delete_item_using_delete(self):\n pass", "def visit_Delete(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr) ]\n return ast.Expr(to_call(to_name('delattr'), args))\n return node" ]
[ "0.66921794", "0.6634915", "0.6633796", "0.6570889", "0.6552683", "0.65220124", "0.6386673", "0.637424", "0.633695", "0.6330479", "0.63189936", "0.629664", "0.62494636", "0.6222551", "0.6222551", "0.6154767", "0.6154767", "0.6136742", "0.6106075", "0.6077464", "0.60384274", "0.6037455", "0.60155076", "0.598926", "0.5984369", "0.592938", "0.59137845", "0.59051883", "0.59021115", "0.5886797", "0.587863", "0.5864375", "0.58249867", "0.5802411", "0.5791504", "0.57902604", "0.5766326", "0.5766326", "0.5741133", "0.5740761", "0.5739162", "0.57376665", "0.5733008", "0.57301325", "0.5727387", "0.57228327", "0.56996167", "0.56862223", "0.5684221", "0.566815", "0.5654707", "0.5643371", "0.5643325", "0.5641376", "0.56350946", "0.5621569", "0.5621569", "0.56161344", "0.56161344", "0.5615948", "0.56156", "0.5608215", "0.56081927", "0.5602161", "0.5599343", "0.5563541", "0.5554919", "0.55503654", "0.5535351", "0.55339366", "0.5519959", "0.55085826", "0.55053747", "0.5498008", "0.54967976", "0.54965293", "0.5495589", "0.54899126", "0.5488923", "0.54803365", "0.5453061", "0.54471713", "0.54453856", "0.5442672", "0.542816", "0.5420549", "0.541784", "0.54170454", "0.5407839", "0.5407198", "0.5406591", "0.5396052", "0.53914523", "0.53903514", "0.5387711", "0.53859156", "0.53732055", "0.53724223", "0.53717333", "0.53503805" ]
0.67726755
0
Convert indexed augmented assignment to `getitem`/`setitem` calls.
def visit_AugAssign(self, node): self.generic_visit(node) stmts = [] target = node.target if not isinstance(target, ast.Subscript): return node # AST node for target value, gensym-ed if necessary. if self.can_reevaluate(target.value): target_node = target.value else: target_node = to_name(gensym()) stmts.append(ast.Assign( [set_ctx(target_node, ast.Store())], target.value)) # AST node for index, gensym-ed if necessary. index_expr = self.index_to_expr(target.slice) if self.can_reevaluate(index_expr): index_node = index_expr else: index_node = to_name(gensym()) stmts.append(ast.Assign( [set_ctx(index_node, ast.Store())], index_expr)) # Main AST node for the indexed augemented assignment. stmts.append(ast.Expr( to_call(to_attribute(self.operator, 'setitem'), [ target_node, index_node, to_call(self.op_to_function(node.op), [ to_call(to_attribute(self.operator, 'getitem'), [ target_node, index_node, ]), node.value ]) ]) )) return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])],\n value)", "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def __setitem__(key, value):", "def reconstruct_input(self, ix):", "def __setitem__(name, other):", "def __setitem__(self, index, item):\n if isinstance(index, types.SliceType):\n # NOTE: item must be an iterable (list of tuples)\n self._main[index] = OrderedDict(item)\n else:\n # FIXME: Does this raise a sensible error?\n orig = self._main.keys[index]\n key, value = item\n if self._main.strict and key in self and (key != orig):\n raise ValueError('slice assignment must be from '\n 'unique keys')\n # delete the current one\n del self._main[self._main._sequence[index]]\n self._main.insert(index, key, value)", "def __setitem__(self, i, v):\n raise TypeError(\"'Factorization' object does not support item assignment\")", "def __setitem__(self, index, value):\n if not isinstance(index, tuple) or len(index) > 2:\n msg = \"data subscripting must be [rows,cols] or [rows,]\"\n raise ValueError(msg)\n sel_rows = self._check_index(self._nobs, index[0])\n sel_cols = (self._convert_col_index(index[1])\n if len(index) == 2 else None)\n sel_cols = self._check_index(self._nvar, sel_cols)\n \n nrows, ncols = len(sel_rows), len(sel_cols)\n \n value = self._standardize_input(value)\n \n # Reformation above is wrong for a single-row assignment, where\n # values [val1, val2, ...] should be interpreted as \n # single row: [[val1, val2, ...]]. Procedure above makes it \n # into [[val1], [val2], ...] (the correct assumption otherwise).\n if (nrows == 1 and ncols != 1 and \n len(value) == ncols and all(len(v) == 1 for v in value)):\n value = (tuple(v[0] for v in value),)\n else: # check that value dimensions match expected\n if not len(value) == nrows:\n raise ValueError(\"length of value does not match # of rows\")\n if not all(len(v) == ncols for v in value):\n raise ValueError(\"inner dimensions do not match # of columns\")\n \n # If no rows or no cols, nothing to do.\n # Could put this above the call to _standardize_input, \n # but then input of any shape allowed.\n if nrows == 0 or ncols == 0:\n return\n \n self._set_values(sel_rows, sel_cols, value)\n \n # Modify srtlist if necessary. If col_num is in srtlist, drop it\n # and any to the right. Ideally, would only make this change if \n # values were truly changed, by comparing new value with old.\n srtlist = self._srtlist\n nvar = self._nvar\n for col_num in sel_cols:\n if col_num in srtlist:\n srt_pos = srtlist.index(col_num)\n srtlist = srtlist[:srt_pos] + [None]*(nvar - srt_pos)\n self._srtlist = srtlist\n \n self._changed = True", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def assignment(x, values, indices, axis=0):\n x_new = copy(x)\n\n use_vectorization = hasattr(indices, \"__len__\") and len(indices) < ndim(x)\n if _is_boolean(indices):\n x_new[indices] = values\n return x_new\n zip_indices = _is_iterable(indices) and _is_iterable(indices[0])\n len_indices = len(indices) if _is_iterable(indices) else 1\n if zip_indices:\n indices = tuple(zip(*indices))\n if not use_vectorization:\n if not zip_indices:\n len_indices = len(indices) if _is_iterable(indices) else 1\n len_values = len(values) if _is_iterable(values) else 1\n if len_values > 1 and len_values != len_indices:\n raise ValueError(\"Either one value or as many values as indices\")\n x_new[indices] = values\n else:\n indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))\n x_new[indices] = values\n return x_new", "def __setitem__(self, key, val):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__setitem__')(key, val)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n lval = _ensure_len(len(key), val)\n for i, k in enumerate(key):\n operator.__setitem__(self, k, lval[i])\n elif isinstance(key, slice):\n lval = val\n if not isinstance(val, collections.Iterable):\n slice_len = len([i for i in range(*key.indices(len(self)))])\n lval = _ensure_len(slice_len, val)\n list.__setitem__(self, key, lval)\n else:\n list.__setitem__(self, key, val)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n lval = _ensure_len(len(key), val)\n for i, k in enumerate(key):\n operator.__setitem__(self[i], k, lval[i])\n elif isinstance(key, tuple):\n lval = _ensure_len(len(self), val)\n try:\n for i, x in enumerate(self):\n operator.__setitem__(x, key, lval[i])\n except Exception:\n for i, x in enumerate(self):\n for j, k in enumerate(key):\n operator.__setitem__(x, k, lval[i][j])\n else:\n lval = _ensure_len(len(self), val)\n for i, x in enumerate(self):\n operator.__setitem__(x, key, lval[i])\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__setitem__', k, v) and apply(operators.__setitem__, k, v)\n return self", "def __setitem__(self, ind: int, value: float) -> None:", "def __setitem__(self, key, value):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)", "def __getitem__(self, index):\n return index, super().__getitem__(index)", "def __setitem__(self, index, value):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._setitem2d(index, value)\n elif self.dim == 3:\n return self._setitem3d(index, value)", "def __setitem__(self, k, v):\n\t\treturn setattr(self, k, v)", "def __setitem__(self, index, value):\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value", "def __getitem__(self, idx):\n src_idx, tgt_idx = self.full_idxs[idx]\n\n X = {'src': self.X['src'][src_idx], 'tgt': self.X['tgt'][tgt_idx]}\n for key, value in X.items():\n X[key] = self.transform(X[key])\n\n y = {'src': self.y['src'][src_idx], 'tgt': self.y['tgt'][tgt_idx]}\n\n return X, y", "def __setitem__(self, inds, value):\n i, j = inds\n self.array[i][j] = value", "def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)", "def setitem(obj, idx, value):\n obj.__setitem__(idx, value)", "def replace_at_index(tup, ix, val):\n\n return tup[:ix] + (val,) + tup[ix + 1:]", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def __setitem__(self, key, value):", "def __setitem__(self,i,v):\n _items[i] = v", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def __setitem__(self, key, val):\n if isinstance(key, types.SliceType):\n if not isinstance(val, OrderedDict):\n # FIXME: allow a list of tuples?\n raise TypeError('slice assignment requires an OrderedDict')\n keys = self._sequence[key]\n # NOTE: Could use ``range(*key.indices(len(self._sequence)))``\n indexes = range(len(self._sequence))[key]\n if key.step is None:\n # NOTE: new slice may not be the same size as the one being\n # overwritten !\n # NOTE: What is the algorithm for an impossible slice?\n # e.g. d[5:3]\n pos = key.start or 0\n del self[key]\n newkeys = val.keys()\n for k in newkeys:\n if k in self:\n if self.strict:\n raise ValueError('slice assignment must be from '\n 'unique keys')\n else:\n # NOTE: This removes duplicate keys *first*\n # so start position might have changed?\n del self[k]\n self._sequence = (self._sequence[:pos] + newkeys +\n self._sequence[pos:])\n dict.update(self, val)\n else:\n # extended slice - length of new slice must be the same\n # as the one being replaced\n if len(keys) != len(val):\n raise ValueError('attempt to assign sequence of size %s '\n 'to extended slice of size %s' % (len(val), len(keys)))\n # FIXME: efficiency?\n del self[key]\n item_list = zip(indexes, val.items())\n # smallest indexes first - higher indexes not guaranteed to\n # exist\n item_list.sort()\n for pos, (newkey, newval) in item_list:\n if self.strict and newkey in self:\n raise ValueError('slice assignment must be from unique'\n ' keys')\n self.insert(pos, newkey, newval)\n else:\n if key not in self:\n self._sequence.append(key)\n dict.__setitem__(self, key, val)", "def __getitem__(self, index):\n # type: (int) -> Any\n items = list.__getitem__(self, index)\n return type(self)(self._values(items)) if isinstance(index, slice) else self.value(items)", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def __getitem__(self, idx):\n results = copy.deepcopy(self.db[idx])\n results['ann_info'] = self.ann_info\n return self.pipeline(results)", "def __setitem__(self, key, value):\n # Both row index and columns given\n if isinstance(key, tuple):\n index, column = key\n index = self._slice_index(index) if isinstance(index, slice) else index\n return self.set(indexes=index, columns=column, values=value)\n # Row indexed with slice, all columns\n elif isinstance(key, slice):\n return self.set(indexes=self._slice_index(key), values=value)\n # Single row\n else:\n return self.set(indexes=key, values=value)", "def __setitem__(self, key, val):\n self[key][...] = val", "def __setitem__(self, index, name):\n if isinstance(index, types.SliceType):\n # FIXME: efficiency?\n # check length is the same\n indexes = range(len(self._main._sequence))[index]\n if len(indexes) != len(name):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(indexes)))\n # check they are the same keys\n # FIXME: Use set\n old_keys = self._main._sequence[index]\n new_keys = list(name)\n old_keys.sort()\n new_keys.sort()\n if old_keys != new_keys:\n raise KeyError('Keylist is not the same as current keylist.')\n orig_vals = [self._main[k] for k in name]\n del self._main[index]\n vals = zip(indexes, name, orig_vals)\n vals.sort()\n for i, k, v in vals:\n if self._main.strict and k in self._main:\n raise ValueError('slice assignment must be from '\n 'unique keys')\n self._main.insert(i, k, v)\n else:\n raise ValueError('Cannot assign to keys')", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __setitem__(self, index, item):\n # type: (int, Any) -> None\n items = self._refs(item) if isinstance(index, slice) else self.ref(item)\n return list.__setitem__(self, index, items)", "def __setitem__(self, index, value):\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError, \"Table indices must be int or tuple\"", "def __setitem__(self, name, obj):", "def __setitem__(self, index, value):\n if isinstance(index, types.SliceType):\n keys = self._main._sequence[index]\n if len(keys) != len(value):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(keys)))\n # FIXME: efficiency? Would be better to calculate the indexes\n # directly from the slice object\n # NOTE: the new keys can collide with existing keys (or even\n # contain duplicates) - these will overwrite\n for key, val in zip(keys, value):\n self._main[key] = val\n else:\n self._main[self._main._sequence[index]] = value", "def __getitem__(sliceOrIdentifier):", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def __getitem__(self, idx):\n pass", "def __getitem__(self, idx):\n pass", "def __getitem__(self, index):\n raise NotImplementedError", "def __setitem__(self, key, val):\n self()[key] = val", "def _setitem_static(x, indices, values):\n from .framework import default_main_program, Variable\n\n if x.type == paddle.fluid.core.VarDesc.VarType.LOD_TENSOR_ARRAY:\n return _setitem_for_tensor_array(x, indices, values)\n\n # step1: parsing the index and recording them\n (\n starts,\n ends,\n steps,\n axes,\n none_axes,\n decrease_axes,\n advanced_index,\n has_advanced_index,\n use_strided_slice,\n ) = parse_index(x, indices)\n\n inputs = {'Input': x}\n attrs = {\n 'axes': axes,\n 'starts': starts,\n 'ends': ends,\n 'steps': steps,\n 'decrease_axes': decrease_axes,\n 'none_axes': none_axes,\n }\n if paddle.utils._contain_var(starts):\n inputs['StartsTensorList'] = paddle.utils._convert_to_tensor_list(\n starts\n )\n del attrs['starts']\n if paddle.utils._contain_var(ends):\n inputs['EndsTensorList'] = paddle.utils._convert_to_tensor_list(ends)\n del attrs['ends']\n if paddle.utils._contain_var(steps):\n inputs['StepsTensorList'] = paddle.utils._convert_to_tensor_list(steps)\n del attrs['steps']\n\n if not has_advanced_index:\n # step2. Parse values\n dtype = x.dtype\n attrs['dtype'] = dtype\n\n from .data_feeder import convert_dtype\n\n if isinstance(values, (bool, int, float, complex)):\n values = np.array([values]).astype(convert_dtype(dtype))\n\n if isinstance(values, np.ndarray):\n shape = list(values.shape)\n values = values.ravel().tolist()\n attrs[\"values\"] = values\n attrs[\"shape\"] = shape\n\n elif isinstance(values, Variable):\n inputs[\"ValueTensor\"] = values\n else:\n raise TypeError(\n \"Only support to assign an integer, float, numpy.ndarray or \"\n \"paddle.Tensor to a paddle.Tensor, but received {}\".format(\n type(values)\n )\n )\n\n # step3.1: Only basic indexing, use OP set_value to set value.\n if paddle.in_dynamic_mode():\n x._bump_inplace_version()\n output = x\n else:\n helper = paddle.fluid.layer_helper.LayerHelper(\n 'set_value', **locals()\n )\n if helper.main_program.current_block_idx != 0:\n # not in global block, we should create a global variable.\n output = helper._create_global_variable_for_type_inference(\n dtype=x.dtype\n )\n else:\n output = helper.create_variable_for_type_inference(\n dtype=x.dtype\n )\n cur_block = default_main_program().current_block()\n cur_block.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': output},\n attrs=attrs,\n inplace_map={\"Input\": \"Out\"},\n )\n\n if not paddle.in_dynamic_mode():\n # map var to the new output\n paddle.jit.api.ProgramTranslator.get_instance()._params_map.add(\n cur_block.program, x.desc.id(), output\n )\n return output\n else:\n # step3.2: Case for there are advanced indexing.\n # 1. get __getitem__ result of basic indexing;\n # 2. transpose original tensor so that the axis with advanced indexing will come to the first;\n # 3. assign values to the sliced result by index_put OP;\n # 4. transpose back and assign the result to original tensor by set_value OP.\n\n sub_tensor = get_tensor_with_basic_indexing(\n x,\n axes,\n starts,\n ends,\n steps,\n decrease_axes,\n none_axes,\n use_strided_slice,\n )\n (\n transed_sub_tensor,\n adjusted_advanced_index,\n transback_dim,\n _,\n _,\n ) = deal_advanced_index(sub_tensor, advanced_index, True)\n if not isinstance(values, Variable):\n values = paddle.assign(values).astype(transed_sub_tensor.dtype)\n transed_sub_tensor = transed_sub_tensor.index_put(\n adjusted_advanced_index, values\n )\n\n # NOTE(zoooo0820): now basic indexing of __getitem__ will return a new Tensor both in dynamic and static mode\n # After strided is ready and basic indexing returns view of Tensor in dynamic mode. The code shoule be changed\n # for dynamic mode.\n if paddle.in_dynamic_mode():\n transed_sub_tensor.index_put_(adjusted_advanced_index, values)\n else:\n transed_sub_tensor = transed_sub_tensor.index_put(\n adjusted_advanced_index, values\n )\n\n transback_sub_tensor = transed_sub_tensor.transpose(transback_dim)\n\n inputs[\"ValueTensor\"] = transback_sub_tensor\n if paddle.in_dynamic_mode():\n x._bump_inplace_version()\n output = x\n else:\n helper = paddle.fluid.layer_helper.LayerHelper(\n 'set_value', **locals()\n )\n if helper.main_program.current_block_idx != 0:\n # not in global block, we should create a global variable.\n output = helper._create_global_variable_for_type_inference(\n dtype=x.dtype\n )\n else:\n output = helper.create_variable_for_type_inference(\n dtype=x.dtype\n )\n cur_block = default_main_program().current_block()\n cur_block.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': output},\n attrs=attrs,\n inplace_map={\"Input\": \"Out\"},\n )\n if not paddle.in_dynamic_mode():\n # map var to the new output\n paddle.jit.api.ProgramTranslator.get_instance()._params_map.add(\n cur_block.program, x.desc.id(), output\n )\n return output", "def __setitem__(self, index, value):\n self.buffer[index] = value", "def __getitem__ (self, index):\n pass", "def __setitem__(self, i, value):\n self._ar[i] = value", "def __setitem__(self, attribute_name, value):\n pass # pragma: no cover", "def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n # Convert all possible input key types to an array of integers\n if is_bool_dtype(key):\n key = np.argwhere(key).flatten()\n elif isinstance(key, slice):\n key = np.array(range(len(self))[key])\n elif is_integer(key):\n key = np.array([key])\n else:\n key = np.asanyarray(key)\n\n if pd.api.types.is_scalar(value):\n value = np.broadcast_to(value, len(key))\n else:\n value = np.asarray(value)\n\n if len(key) != len(value):\n raise ValueError(\"Length mismatch between index and value.\")\n\n affected_chunks_index = self._get_chunk_indexer(key)\n affected_chunks_unique = np.unique(affected_chunks_index)\n\n all_chunks = list(self.data.iterchunks())\n\n for ix, offset in zip(\n affected_chunks_unique, self.offsets[affected_chunks_unique]\n ):\n chunk = all_chunks[ix]\n\n # Translate the array-wide indices to indices of the chunk\n key_chunk_indices = np.argwhere(affected_chunks_index == ix).flatten()\n array_chunk_indices = key[key_chunk_indices] - offset\n\n arr = chunk.to_pandas().values\n # In the case where we zero-copy Arrow to Pandas conversion, the\n # the resulting arrays are read-only.\n if not arr.flags.writeable:\n arr = arr.copy()\n arr[array_chunk_indices] = value[key_chunk_indices]\n\n mask = None\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if (\n pa.types.is_integer(self.dtype.arrow_dtype)\n or pa.types.is_date(self.dtype.arrow_dtype)\n or pa.types.is_floating(self.dtype.arrow_dtype)\n or pa.types.is_boolean(self.dtype.arrow_dtype)\n ):\n nan_values = pd.isna(value[key_chunk_indices])\n if any(nan_values):\n nan_index = key_chunk_indices & nan_values\n mask = np.ones_like(arr, dtype=bool)\n mask[nan_index] = False\n pa_arr = pa.array(arr, self.dtype.arrow_dtype, mask=mask)\n all_chunks[ix] = pa_arr\n\n self.data = pa.chunked_array(all_chunks)", "def __getitem__(self, index):\n pass", "def __getitem__(self, index):\n pass", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ComposedVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ComposedVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def __getitem__(self, idx):\n return self.transform(self.X[idx]), self.y[idx]", "def _setitem_impl(self, index, obj, value):\n obj.set_value(value)\n return obj", "def __setitem__(self, index, value):\n self.elem[index] = value", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def reindex_variables(variables, indexes, indexers, method=None, copy=True):\n # build up indexers for assignment along each index\n to_indexers = {}\n to_shape = {}\n from_indexers = {}\n for name, index in iteritems(indexes):\n to_shape[name] = index.size\n if name in indexers:\n target = utils.safe_cast_to_index(indexers[name])\n indexer = index.get_indexer(target, method=method)\n\n to_shape[name] = len(target)\n # Note pandas uses negative values from get_indexer to signify\n # values that are missing in the index\n # The non-negative values thus indicate the non-missing values\n to_indexers[name] = indexer >= 0\n if to_indexers[name].all():\n # If an indexer includes no negative values, then the\n # assignment can be to a full-slice (which is much faster,\n # and means we won't need to fill in any missing values)\n to_indexers[name] = slice(None)\n\n from_indexers[name] = indexer[to_indexers[name]]\n if np.array_equal(from_indexers[name], np.arange(index.size)):\n # If the indexer is equal to the original index, use a full\n # slice object to speed up selection and so we can avoid\n # unnecessary copies\n from_indexers[name] = slice(None)\n\n def any_not_full_slices(indexers):\n return any(not is_full_slice(idx) for idx in indexers)\n\n def var_indexers(var, indexers):\n return tuple(indexers.get(d, slice(None)) for d in var.dims)\n\n # create variables for the new dataset\n reindexed = OrderedDict()\n for name, var in iteritems(variables):\n if name in indexers:\n # no need to copy, because index data is immutable\n new_var = Coordinate(var.dims, indexers[name], var.attrs,\n var.encoding)\n else:\n assign_to = var_indexers(var, to_indexers)\n assign_from = var_indexers(var, from_indexers)\n\n if any_not_full_slices(assign_to):\n # there are missing values to in-fill\n dtype, fill_value = _maybe_promote(var.dtype)\n shape = tuple(to_shape[dim] for dim in var.dims)\n data = np.empty(shape, dtype=dtype)\n data[:] = fill_value\n # create a new Variable so we can use orthogonal indexing\n # use fastpath=True to avoid dtype inference\n new_var = Variable(var.dims, data, var.attrs, fastpath=True)\n new_var[assign_to] = var[assign_from].values\n elif any_not_full_slices(assign_from):\n # type coercion is not necessary as there are no missing\n # values\n new_var = var[assign_from]\n else:\n # no reindexing is necessary\n # here we need to manually deal with copying data, since\n # we neither created a new ndarray nor used fancy indexing\n new_var = var.copy() if copy else var\n reindexed[name] = new_var\n return reindexed", "def __getitem__(self, item: slice | list[int]) -> \"Parameters\":\n dictionary = {}\n for key, value in asdict(self).items():\n dictionary.update({key: value[item, :]})\n return replace(self, **dictionary)", "def __getitem__(self, index):\n return self.input_[index], self.output[index]", "def __getitem__(self, *args):\n return self.data.__getitem__(*args)", "def _map(index: Union[np.ndarray, pd.Int64Index], map: Dict[int, int]) -> np.ndarray:\n # About 4x faster than .to_series().apply(lambda).\n # About 20x faster than to_series().replace().\n values = index.to_numpy()\n result = values.copy()\n for k, v in map.items():\n result[values == k] = v\n return result", "def invert_assignment(self, idx2_wxs, idx2_maws, *other_idx2_prop):\n # Invert mapping -- Group by word indexes\n idx2_nAssign = [len(wxs) for wxs in idx2_wxs]\n jagged_idxs = [[idx] * num for idx, num in enumerate(idx2_nAssign)]\n wx_keys, groupxs = vt.jagged_group(idx2_wxs)\n idxs_list = vt.apply_jagged_grouping(jagged_idxs, groupxs)\n wx2_idxs = dict(zip(wx_keys, idxs_list))\n maws_list = vt.apply_jagged_grouping(idx2_maws, groupxs)\n wx2_maws = dict(zip(wx_keys, maws_list))\n\n other_wx2_prop = []\n for idx2_prop in other_idx2_prop:\n # Props are assumed to be non-jagged, so make them jagged\n jagged_prop = [[prop] * num for prop, num in zip(idx2_prop, idx2_nAssign)]\n prop_list = vt.apply_jagged_grouping(jagged_prop, groupxs)\n wx2_prop = dict(zip(wx_keys, prop_list))\n other_wx2_prop.append(wx2_prop)\n if ut.VERBOSE:\n print('[smk_index.assign] L___ End Assign vecs to words.')\n assignment = (wx2_idxs, wx2_maws) + tuple(other_wx2_prop)\n return assignment", "def __getitem__(self, index):\n if self.data is None:\n raise IndexError()\n\n if self.cache and index in self.cached_data:\n return self.cached_data[index]\n\n ex = self.data[index]\n if len(self.transform_hooks) > 0:\n ret = []\n for field, cols in self.transform_hooks:\n _ex = ex[cols]\n if isinstance(cols, List):\n processed_ex = field.process(*_ex)\n else:\n processed_ex = field.process(_ex)\n\n if isinstance(processed_ex, tuple):\n ret.extend(processed_ex)\n else:\n ret.append(processed_ex)\n ret = tuple(ret)\n else:\n ret = tuple(ex)\n\n if self.cache:\n self.cached_data[index] = ret\n\n return ret", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n raise NotImplementedError", "def _exch(self, ix_1, ix_2):\n tmp = self._vals[ix_1]\n self._vals[ix_1] = self._vals[ix_2]\n self._vals[ix_2] = tmp", "def __setitem__(self, i, v):\n # The policy function can't be modified", "def setitem(ary, loc, value):\n\n if not isinstance(loc, tuple):\n loc = (loc,)\n\n # Let's try to convert non-arrays and non-scalars to an array\n # e.g. converting a python list to an array\n if not (bhary.check(value) or np.isscalar(value)):\n value = array_create.array(value)\n\n # Lets make sure that not all dimensions are indexed by integers\n loc = list(loc)\n if len(loc) == ary.ndim and all((np.isscalar(s) for s in loc)):\n # 'slice' doesn't support negative start index\n if loc[0] < 0:\n loc[0] += ary.shape[0]\n loc[0] = slice(loc[0], loc[0] + 1)\n\n # Copy the 'value' to 'ary' using the 'loc'\n if ary.ndim == 0:\n assign(value, ary)\n else:\n assign(value, ary[tuple(loc)])", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n return [self._main[key] for key in self._main._sequence[index]]\n else:\n return self._main[self._main._sequence[index]]", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ZfitBaseVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ZfitBaseVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __getitem__(self, i):\n new_data = super().__getitem__(i)\n if isinstance(i, slice):\n new_data = self.__class__(new_data)\n new_data.global_settings = copy.copy(self.global_settings)\n return new_data", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val", "def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")", "def __setitem__(self, item_index: Index, new_item: Item) -> None:\n raise NotImplementedError(\"__setitem__\")", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n # fetching a slice returns an OrderedDict\n return self._main[index].items()\n key = self._main._sequence[index]\n return (key, self._main[key])", "def __setitem__(self, index_tuple, value):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n array_1d[col] = value", "def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')", "def __getitem__(self,key):\n return self.x[key]", "def affect_model(model, lateral_index, lateral_value):\n model[lateral_index] = lateral_value\n return model", "def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def __setitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___setitem__(self, *args)", "def convert_input(self, indexed_input: JsonDict,\n model_output: JsonDict) -> JsonDict:\n c = copy.copy(indexed_input) # shallow copy\n c[\"data\"] = {\"x\": model_output[self._field_name]}\n return c", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n ops = self.operator_form(index)\n op0 = self.args[0].operator_dict(index, vars, **kw)\n for var in op0:\n out[var] = ops * op0[var]\n return out", "def __getitem__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:\n mask = self.groupby_object._make_mask_from_positional_indexer(arg)\n return self.groupby_object._mask_selected_obj(mask)" ]
[ "0.628359", "0.6214197", "0.5985369", "0.5843468", "0.56949717", "0.5647313", "0.56271034", "0.5615518", "0.55904377", "0.5575391", "0.5575391", "0.5575391", "0.5575391", "0.55560446", "0.5552748", "0.5485441", "0.5474524", "0.54711777", "0.54630387", "0.54482377", "0.5441601", "0.5441004", "0.5433495", "0.542441", "0.53740263", "0.5368299", "0.5360136", "0.5344604", "0.5343106", "0.5335126", "0.53336906", "0.53259856", "0.53258866", "0.5318966", "0.5315597", "0.53109133", "0.53067166", "0.5303219", "0.5299736", "0.52643853", "0.52606726", "0.52590424", "0.5257772", "0.52328336", "0.5229233", "0.5229233", "0.51927096", "0.5185136", "0.5174272", "0.5167976", "0.5140058", "0.51263285", "0.512153", "0.51034594", "0.5101385", "0.5101385", "0.50766695", "0.50756764", "0.50750065", "0.5074466", "0.5071859", "0.5071676", "0.50682324", "0.50653625", "0.5062952", "0.50544196", "0.505368", "0.504948", "0.50443566", "0.50443566", "0.50365174", "0.5031439", "0.5031272", "0.50244254", "0.5023841", "0.50238377", "0.50238377", "0.50238377", "0.5022286", "0.5018597", "0.5018062", "0.50177675", "0.5014141", "0.5006745", "0.5003336", "0.49998218", "0.49986607", "0.49984118", "0.4986043", "0.49744806", "0.49591014", "0.49580616", "0.494687", "0.49420688", "0.49352637", "0.4926351", "0.49132296", "0.4913021", "0.49114892", "0.49085104" ]
0.6089785
2
Whether the AST node can be safely evaluated twice.
def can_reevaluate(self, node): return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \ (six.PY3 and isinstance(node, ast.Bytes)) or \ (ast_has_name_constant and isinstance(node, ast.NameConstant))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evil_hack(self, other):\n if isinstance(other, FExpr):\n return other == self\n return isinstance(other, self.__class__) and self.id == other.id", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def is_used_as_expression(item):\n # note: this is not accurate because of the last statement of a program\n # but intended\n return not is_used_as_statement(item)", "def has_right(self):\n return self.right != None", "def has_right(self):\n return self.__right != None", "def has_duplicated_literal(head: Atom, body: Body) -> bool:\n return len(body) != len(set(body.get_literals()))", "def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False", "def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None", "def check_for_right(self) -> bool:\n\t\tboolean_expression_has_right = False\n\t\texpression_has_right = False\n\t\tif self.boolean_expression:\n\t\t\tboolean_expression_has_right = self.boolean_expression.check_for_right()\n\t\tif self.expression:\n\t\t\texpression_has_right = self.expression.check_for_right()\n\t\treturn boolean_expression_has_right or expression_has_right", "def only_once(self) -> bool:\n return self.times == 1", "def is_true(node):\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)", "def has_expression(self):\n return self._expression is not None", "def has_right(self):\n return self.r is not None", "def _seen(node):\n\t\tcheck = linked_list\n\t\twhile check != node:\n\t\t\tif check.value == node.value:\n\t\t\t\treturn True\n\t\t\tcheck = check.next\n\t\treturn False", "def _has_right(self, index):\r\n return self._right(index) < len(self)", "def _isImmediatelyConcurrentWithHelper(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n offsetsOfPlusOne = 0\n offsetsOfMinusOne = 0\n equalities = 0\n for id in self.clock.keys():\n if (self.clock[id] + 1) == other.clock[id]:\n offsetsOfPlusOne += 1\n if (self.clock[id] - 1) == other.clock[id]:\n offsetsOfMinusOne += 1\n elif self.clock[id] == other.clock[id]:\n equalities += 1\n if offsetsOfPlusOne == 1 and offsetsOfMinusOne == 1 and equalities == len(self.clock.keys()) - 2:\n return True\n else:\n return False", "def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1", "def __call__(self, first: Node, second: Node) -> bool:\n if not (is_next(first, second) and self._compare_attributes(first, second)):\n self.accumulated_axes = set()\n return False\n\n fst_axes = set([a for a in Interpolate.get_axes(first)])\n snd_axes = set([a for a in Interpolate.get_axes(second)])\n\n self.accumulated_axes = self.accumulated_axes | fst_axes\n\n # If the set of accumulated axes and the set of axes of 'second' do not intersect then nodes can be fused,\n # because interpolations with respect to various axes do not affect each other.\n if not(self.accumulated_axes & snd_axes):\n return True\n\n # Otherwise, nodes cannot be fused.\n self.accumulated_axes = set()\n return False", "def has_side_effect(self):\n # XXX Need to handle OpExtInst correctly (it is conservative now)\n if self.result_id is None:\n return True\n return self.op_name in spirv.HAS_SIDE_EFFECT", "def done(self):\n return self.left + 1 == self.right", "def semileaf(self):\n if self._leftchild and not self._rightchild:\n return True\n if self._rightchild and not self._leftchild:\n return True\n return False", "def _aresame(a, b):\n from .numbers import Number\n from .function import AppliedUndef, UndefinedFunction as UndefFunc\n if isinstance(a, Number) and isinstance(b, Number):\n return a == b and a.__class__ == b.__class__\n for i, j in zip_longest(_preorder_traversal(a), _preorder_traversal(b)):\n if i != j or type(i) != type(j):\n if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or\n (isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):\n if i.class_key() != j.class_key():\n return False\n else:\n return False\n return True", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def __bool__(self):\n return len(self.atoms) >= 1", "def is_semileaf(self):\n if self._leftchild and self._rightchild:\n return False\n if not self._leftchild and not self._rightchild:\n return False\n return True", "def nodes_are_equal(node1, node2):\n\n try:\n return dump_ast(node1).strip() == dump_ast(node2).strip() and \\\n node1.lineno == node2.lineno and \\\n node1.col_offset == node2.col_offset\n except:\n return False", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def _ast_node_is_in_docstring_position(ast_node):\n if not isinstance(ast_node, (ast.Str, Bytes)):\n raise TypeError\n expr_node = ast_node.context.parent\n if not isinstance(expr_node, ast.Expr):\n return False\n assert ast_node.context.field == 'value'\n assert ast_node.context.index is None\n expr_ctx = expr_node.context\n if expr_ctx.field != 'body':\n return False\n parent_node = expr_ctx.parent\n if not isinstance(parent_node, (ast.FunctionDef, ast.ClassDef, ast.Module, AsyncFunctionDef)):\n return False\n if expr_ctx.index == 0:\n return True\n prev_sibling_node = parent_node.body[expr_ctx.index-1]\n if isinstance(prev_sibling_node, ast.Assign):\n return True\n return False", "def even(self):\n return self._ % 2 == 0", "def has_logical_equivalent(self, node):\n return node.name in logical_equivalents", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def is_false(node):\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)", "def checkAstIntegrity(instruction):\n try:\n for se in instruction.getSymbolicExpressions():\n str(se.getAst())\n\n for x, y in instruction.getLoadAccess():\n str(y)\n\n for x, y in instruction.getStoreAccess():\n str(y)\n\n for x, y in instruction.getReadRegisters():\n str(y)\n\n for x, y in instruction.getWrittenRegisters():\n str(y)\n\n for x, y in instruction.getReadImmediates():\n str(y)\n\n return True\n\n except:\n return False", "def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if self.__is_simultaneous_state:\n return False\n return super()._should_try_reoptimize(last_statistics_refresh_time, last_event)", "def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)", "def requires_safe_render(self) -> bool:\n return True\n # return any(is_reserved(child.name) for child in self.children)", "def isScalene(self):\n\t\treturn self.a != self.b != self.c", "def is_internal(self):\n # TODO: Check if either left child or right child has a value\n return ... or ...", "def contains(self, node):\n return node == self.__node_a or node == self.__node_b", "def is_valid_block(self, first):\n return (self.a_cursor > first.a and\n self.b_cursor > first.b)", "def __eq__(self, other):\n return (other is self) or (isinstance(other, Expr)\n and self.op == other.op and self.args == other.args)", "def has_node(self, n):\n return n in self.node_dict", "def identical_to(self, elem):\n\n return (self.n1 == elem.n1) and (self.n2 == elem.n2)", "def __contains__(self, x):\n # if not isinstance(x, int) or not x % 2:\n if not (isinstance(x, int) and (x % 2)):\n return False\n return True", "def isRight(self):\n\t\tif self.sq(self.a) == self.sq(self.b) + self.sq(self.c):\n\t\t\treturn True\n\t\telif self.sq(self.b) == self.sq(self.a) + self.sq(self.c):\n\t\t\treturn True\n\t\telif self.sq(self.c) == self.sq(self.a) + self.sq(self.b):\n\t\t\treturn True\n\t\treturn False", "def compare(self, node) -> bool:\n\t\t# No conflicts, Return True\n\t\treturn True", "def syntax_check(node_compressed, sub_info, partial=False):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n # Expand subroutines\n node = expand_to_atoms(node_compressed, sub_info)\n in_loop = False\n current_loop_has_1 = False\n current_loop_has_14 = False\n for inst in node:\n if inst == 25:\n if not in_loop:\n in_loop = True\n else:\n return False\n elif inst == 26:\n if in_loop and current_loop_has_1 and current_loop_has_14:\n in_loop = current_loop_has_1 = current_loop_has_14 = False\n else:\n return False\n elif inst == 1 and in_loop:\n current_loop_has_1 = True\n elif inst == 14 and in_loop:\n current_loop_has_14 = True\n elif inst == 16 and in_loop:\n return False\n return not in_loop or partial", "def fingertip_no_recompute(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_recompute\", False)", "def hasVeryTrustedValue(self):\n return self.subnode_source.hasVeryTrustedValue()", "def is_permission_already_granted(self):\n return self._tag == 'permission_already_granted'", "def _has(self, ast, label):\n return len(self._find_all(ast, label, max_results=1)) == 1", "def is_self_member_ref(memberexpr: MemberExpr) -> bool:\n # TODO: Merge with is_self_member_ref in semanal.py.\n if not isinstance(memberexpr.expr, NameExpr):\n return False\n node = memberexpr.expr.node\n return isinstance(node, Var) and node.is_self", "def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.index == node.index", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def has_multiple_roots(self):\n root = self.left_root\n if root != NULL and self.right_sib(root) != NULL:\n return True\n return False", "def _has_right(self, j):\n return self._right(j) < len(self._data)", "def time_invariant(self, expr=None):\n if expr is None:\n return all(self.time_invariant(v) for v in self.values())\n\n if any(i in expr.free_symbols for i in self.time_indices):\n return False\n queue = [expr.rhs] if expr.is_Equality else [expr]\n while queue:\n item = queue.pop()\n temporaries = []\n for i in retrieve_terminals(item):\n if any(j in i.free_symbols for j in self.time_indices):\n # Definitely not time-invariant\n return False\n if i in self:\n # Go on with the search\n temporaries.append(i)\n elif isinstance(i, Dimension):\n # Go on with the search, as /i/ is not a time dimension\n continue\n elif not i.base.function.is_SymbolicData:\n # It didn't come from the outside and it's not in self, so\n # cannot determine if time-invariant; assume time-varying\n return False\n queue.extend([self[i].rhs for i in temporaries if self[i].rhs != item])\n return True", "def __eq__(self, other):\n if not isinstance(other, Expression):\n return False\n\n return self.evaluate() == other.evaluate()", "def test_RestrictingNodeTransformer__visit_IsNot__1():\n assert restricted_eval('2 is not None') is True", "def is_equivalence(self) -> bool:", "def has_node(self, n):\n return n in self.dict", "def _optimized(self):\n return False", "def hasCycle(self, head: ListNode) -> bool:\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow: return True\n return False", "def _is_equal_to_atom(self, atom):\n\n return False", "def _like_rnncell(cell):\n conditions = [hasattr(cell, \"output_size\"), hasattr(cell, \"state_size\"),\n hasattr(cell, \"zero_state\"), callable(cell)]\n return all(conditions)", "def _should_eval(self):\n return False", "def isfalse(self):\n return len(self.literals) == 0", "def is_nf(self):\n return (\n self.depth <= 2 and\n all(isinstance(arg, Literal) or isinstance(arg, self.DUAL)\n for arg in self._args)\n )", "def second_identity_law(x):\n return second(id_, x) == id_(x)", "def static_or_private(self) -> bool:\n # skip plain assignments\n if isinstance(self.definition, _ast.Assign):\n return False\n # skip plain pass expressions\n elif isinstance(self.definition, _ast.Pass):\n return False\n # skip plain expressions\n elif isinstance(self.definition, _ast.Expr):\n return False\n\n try:\n # get list of method decorators\n decorators = self.definition.decorator_list\n # iterate over decorators\n for item in decorators:\n # if 'staticmethod' in decorator list\n if item.id == 'staticmethod':\n return True\n except AttributeError:\n pass\n\n try:\n # check for '_' and '__' starts chars\n if self.definition.name.startswith('_') or self.definition.name.startswith('__'):\n # skip magic methods\n if not self.definition.name.endswith('__'):\n return True\n except AttributeError:\n raise\n\n return False", "def is_exhausted(self):\n return self.root.is_exhausted", "def has_lone_atom(self) -> bool:\n return self._has_lone_atom()", "def _eq(a, b):\n return (a - b) % 2 == 0", "def is_valid_receiver(expr: Expr) -> bool:\r\n return type(expr) in [Variable, Index, Attribute]", "def is_cyclic(self):\n return self._.b[0] == 2 and self._.c[-1] in [1, 2] and \\\n all(x == 1 for x in self._.b[1:-1] + self._.c[1:-1])", "def check_if_double(tile: list):\n return tile[0] == tile[1]", "def fn(n1, n2):\n if not n1 or not n2: return n1 is n2\n return n1.val == n2.val and (fn(n1.left, n2.right) and fn(n1.right, n2.left) or fn(n1.left, n2.left) and fn(n1.right, n2.right))", "def isValidRelaxed(cls,root):\n valid = True\n # no anonymous entities allowed\n for a in root.getiterator(\"reltoken\"):\n if len(a)==0 and a.attrib['relaxed_tag']==\"entity\":\n printError(cls,inspect.stack()[1][3],\"Reltoken of physical type with no nested subtokens\")\n valid = False\n return(valid)", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def test_RestrictingNodeTransformer__visit_Is__1():\n assert restricted_eval('None is None') is True", "def resolve_to_true(self):\n print(colored(f\"Checking {self}\\n\", attrs=['bold', 'underline']))\n for elem in self.operands:\n # print(f\"Checking elem {elem}\")\n if not elem.resolve_to_true():\n print(colored(f\"Since {elem} is False then {self} is False\\n\", attrs=[\n 'bold', 'underline']))\n return False\n print(colored(f\"{self} is True !\\n\", attrs=['bold', 'underline']))\n return True", "def is_one(self, a):\n return a == self.one", "def single_pos_no_conflict_check(position, R):\n if position in pos0:\n return False\n for step in R[:-2]:\n if position in step:\n return False\n return True", "def __has_frequency_one(self):\n return self.head.next.key == 1", "def __check_node_is_already_visited(self, node_name: str) -> bool:\r\n return next((True for visited_node in self.__priority_queue if visited_node.node_name == node_name), False)", "def double(self):\n if self.__valeur1 == self.__valeur2:\n return True\n else:\n return False", "def do_is(op_left, op_right):\n if isa(op_left, float) and isa(op_right, float):\n return op_left == op_right\n return op_left is op_right", "def __ne__(self, other: 'NextHref') -> bool:\n return not self == other", "def fn(node, x):\n if not node: return False \n if not node.left and not node.right: return node.val == x\n return fn(node.left, x-node.val) or fn(node.right, x-node.val)", "def _has_right(self, j):\n return (2 * j + 2) < len(self)", "def is_inequality(self): \n return False", "def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True", "def node_should_be_modified(self, node):\n is_tf_constructor = matching.matches_name_or_namespaces(node, \"TensorFlow\", TF_NAMESPACES)\n return is_tf_constructor and self._has_script_mode_param(node)", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def has_single_root(self):\n root = self.left_root\n if root != NULL and self.right_sib(root) == NULL:\n return True\n return False", "def __nonzero__(self):\n return self.has_prev or self.has_next", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def over(self):\n return self.result is not None", "def __nonzero__(self):\n if self.__fragments:\n return True\n return False", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0" ]
[ "0.5983335", "0.5781899", "0.5668821", "0.5606019", "0.5595086", "0.556334", "0.5537646", "0.55246955", "0.542583", "0.5423907", "0.5420764", "0.54188806", "0.5378163", "0.5366631", "0.53475344", "0.53304714", "0.5311328", "0.5301695", "0.5297375", "0.52919585", "0.5287243", "0.52673966", "0.52544343", "0.5246898", "0.5238788", "0.52095157", "0.51992935", "0.5183977", "0.5178811", "0.514904", "0.51450336", "0.5133796", "0.5126904", "0.51139563", "0.510572", "0.51027036", "0.51014704", "0.5099092", "0.5099044", "0.5098985", "0.5071336", "0.50616944", "0.5061525", "0.50593823", "0.50533366", "0.5047715", "0.5037247", "0.5036167", "0.5030117", "0.5016827", "0.50132036", "0.5012832", "0.50097775", "0.50016004", "0.50008875", "0.499838", "0.49968547", "0.49915236", "0.4989146", "0.4988087", "0.4983892", "0.4983578", "0.49775887", "0.49729258", "0.49652663", "0.4963927", "0.49630678", "0.4962868", "0.49562356", "0.49516836", "0.49484646", "0.49461704", "0.49353176", "0.49285808", "0.49274266", "0.49219126", "0.49198717", "0.49179834", "0.49174875", "0.49149394", "0.4909812", "0.4903923", "0.49026668", "0.49021342", "0.48978314", "0.4897342", "0.48971373", "0.48968184", "0.48959723", "0.48947078", "0.48933983", "0.48879793", "0.4883944", "0.48819658", "0.48713714", "0.48703438", "0.4869969", "0.48655307", "0.48645106", "0.48636973" ]
0.6013396
0
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() return to_attribute(self.operator, inplace_operator_table[name])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator", "def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def to_op(self):\n raise NotImplementedError", "def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def rhs_as_python_func(self, namespace=None):\n namespace = namespace or {}\n\n return eval(\"lambda %s: %s\" % (','.join(self.rhs_names), self.rhs),\n str_to_npfunc_map, namespace)\n # math_namespace.namespace, namespace)", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)", "def translate(expr):\n return from_python(ast.parse(expr))", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def convert_exp(node, **kwargs):\n return create_basic_op_node('Exp', node, kwargs)", "def apply(expr, fun_annotate_subexpr = None):\n assert isinstance(expr, Expression)\n t = type(expr)\n if t is Op:\n try:\n pre, suff = ExprTranslator.OPS_TO_SMTLIB[expr.id]\n return ExprTranslator.subexpr_to_smtlib(expr, pre, suff, fun_annotate_subexpr)\n except KeyError:\n raise Exception(str(expr.id) + ': operation not supported!')\n\n elif t is Var:\n return expr.get_text()\n elif t is ConstInt or t is ConstBool or t is ConstReal:\n return str(expr.get_text())\n elif t is ExprHole:\n return expr.hole_decl.get_function_call()\n else:\n raise Exception(str(t)+': expression type not supported!')", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)", "def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def convert(ast):\n\n if ast and ast.type == \"Function\":\n # Activity function conversion\n if (\n ast.name != \"molecularActivity\"\n and ast.name in belspec[\"namespaces\"][\"Activity\"][\"list\"]\n ):\n print(\"name\", ast.name, \"type\", ast.type)\n ast = convert_activity(ast)\n return ast # Otherwise - this will trigger on the BEL2 molecularActivity\n\n # translocation conversion\n elif ast.name in [\"tloc\", \"translocation\"]:\n ast = convert_tloc(ast)\n\n fus_flag = False\n for idx, arg in enumerate(ast.args):\n if arg.__class__.__name__ == \"Function\":\n\n # Fix substitution -> variation()\n if arg.name in [\"sub\", \"substitution\"]:\n ast.args[idx] = convert_sub(arg)\n\n elif arg.name in [\"trunc\", \"truncation\"]:\n ast.args[idx] = convert_trunc(arg)\n\n elif arg.name in [\"pmod\", \"proteinModification\"]:\n ast.args[idx] = convert_pmod(arg)\n\n elif arg.name in [\"fus\", \"fusion\"]:\n fus_flag = True\n\n # Recursively process Functions\n ast.args[idx] = convert(ast.args[idx])\n\n if fus_flag:\n ast = convert_fus(ast)\n\n return ast", "def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)", "def convert(value):\n if isinstance(value, (Function, NodeBase)):\n return value\n\n if callable(value):\n return _convert_tvm_func(value)\n\n return _convert_to_node(value)", "def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))", "def str_to_operator(s):\n return {\n # https://docs.python.org/3/library/operator.html#mapping-operators-to-functions\n \"<\": operator.lt,\n \"<=\": operator.le,\n \"==\": operator.eq,\n \"!=\": operator.ne,\n \">=\": operator.ge,\n \">\": operator.gt,\n }[s]", "def eval(node):\n if node.id == '(literal)':\n return node.value\n elif node.id == '(name)':\n return scope[node.value]\n elif node.id == '(':\n name, args = node.children\n name = eval(name)\n args = map(eval, args)\n return name(*args)\n elif node.id == 'and':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return eval(node.children[1])\n else:\n return first\n elif node.id == 'or':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return first\n else:\n return eval(node.children[1])\n elif node.id == 'not':\n assert len(node.children) == 1\n return not eval(node.children[0])\n elif node.id in prefix_operators and len(node.children) == 1:\n value = eval(node.children[0])\n return prefix_operators[node.id](value)\n elif node.id in operators:\n values = [eval(v) for v in node.children]\n return operators[node.id](*values)\n else:\n raise ValueError('unknown node type', node)", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def get_symbol(operator):\r\n if isinstance(operator, AST):\r\n operator = type(operator)\r\n try:\r\n return ALL_SYMBOLS[operator]\r\n except KeyError:\r\n raise LookupError('no known symbol for %r' % operator)", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def op(self) -> str:\n return self._node.get(\"op\")", "def do_math(operator, op1, op2):\n if operator == \"*\":\n return op1 * op2\n if operator == \"/\":\n return op1 / op2\n if operator == \"+\":\n return op1 + op2\n if operator == \"-\":\n return op1 - op2\n if operator == \"^\":\n return op1**(op2)", "def convert_unary_op(g, op, block):\n\n # op_map stores mapping relationship between paddlepaddle and relay\n op_map = {\"isinf_v2\": _op.isinf, \"isfinite_v2\": _op.isfinite, \"isnan_v2\": _op.isnan}\n if op.type in op_map:\n unary_func = op_map[op.type]\n else:\n # while paddle operator's name is same with relay\n unary_func = get_relay_op(op.type)\n out = unary_func(g.get_node(op.input(\"X\")[0]))\n g.add_node(op.output(\"Out\")[0], out)", "def _remove_operator(self, operator):", "def operator(app):\n return car(app)", "def load_operator(descriptor_operator_bytes: bytes):\n assert len(descriptor_operator_bytes) > 0\n function_desc_bytes, module_name, class_name = gateway_client.deserialize(\n descriptor_operator_bytes\n )\n if function_desc_bytes:\n return create_operator_with_func(function.load_function(function_desc_bytes))\n else:\n assert module_name\n assert class_name\n mod = importlib.import_module(module_name)\n cls = getattr(mod, class_name)\n logger.info(f\"Load cls type {cls}, {class_name} {mod}\")\n from raystreaming.operator import Operator\n\n assert issubclass(cls, Operator)\n return cls()", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def get_node_target(submodules: Mapping[str, torch.nn.Module], node: pippy.fx.Node) -> str:\n\n assert node.op in CALLABLE_NODE_OPS, (\n \"Expect op types of \" + \", \".join(CALLABLE_NODE_OPS) + f\", but found {node.op}\"\n )\n\n if node.op == \"call_module\":\n assert isinstance(node.target, str)\n submod = submodules[node.target]\n submod_type = getattr(submod, \"_base_class_origin\", type(submod))\n return get_acc_ops_name(submod_type)\n elif node.op == \"call_function\":\n target: Any = node.target\n return (\n f\"acc_ops.{target.__name__}\"\n if target.__module__ is not None and \"acc_ops\" in target.__module__\n else _get_qualified_name(target)\n )\n else:\n assert isinstance(node.target, str)\n return node.target", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def _calculate(self, node):\n if isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return self._operators[type(node.op)](\n self._calculate(node.left),\n self._calculate(node.right)\n )\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return self._operators[type(node.op)](self._calculate(node.operand))\n else:\n raise TypeError(node)", "def _extract_ops_from_onnx_graph(graph, operators, domain_opset_map):\n\n for operator in graph.node:\n # empty domain is used as an alias for 'ai.onnx'\n domain = operator.domain if operator.domain else \"ai.onnx\"\n\n if domain not in operators or domain not in domain_opset_map:\n continue\n\n operators[domain][domain_opset_map[domain]].add(operator.op_type)\n\n for attr in operator.attribute:\n if attr.type == onnx.AttributeProto.GRAPH: # process subgraph\n _extract_ops_from_onnx_graph(attr.g, operators, domain_opset_map)\n elif attr.type == onnx.AttributeProto.GRAPHS:\n # Currently no ONNX operators use GRAPHS.\n # Fail noisily if we encounter this so we can implement support\n raise RuntimeError(\"Unexpected attribute proto of GRAPHS\")", "def ops(rule):\n ops_dict = {'>' : operator.gt,\n '<' : operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=' : operator.eq,\n '==' : operator.eq}\n return ops_dict[rule]", "def singa_op_to_onnx_node(cls, op, op_t):\n optype = cls._get_singa_op_type(op)\n # wether the operator needs special handler\n if optype in cls._special_operators:\n translator = getattr(cls, cls._special_operators[optype])\n else:\n translator = cls._common_singa_tensor_to_onnx_node\n nodes = translator(op, op_t)\n if not isinstance(nodes, collections.Iterable):\n nodes = [nodes]\n nodes = [node for node in nodes if node is not None]\n return nodes", "def test_function_statement_at_operator():\n r = convert_code(\"{@foo arg1=bar arg2=3}\")\n assert r == \"{{ {'arg1': bar, 'arg2': 3}|foo }}\"", "def get_func_ast(obj : types.FunctionType):\n return get_ast(obj).body[0]", "def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'", "def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})", "def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def convert(tree) :\n kind = tree[0]\n\n if kind == \"dot\" :\n return \"dot\" \n elif kind == \"eol\" :\n return \"eol\"\n elif kind == \"char\" :\n return \"lit('\" + tree[1] + \"')\"\n elif kind == \"set\" :\n return \"oneof('\" + tree[1] + \"')\"\n elif kind == \"elem\" :\n if len(tree) >= 3 :\n return convert(tree[2]) \n else :\n return convert(tree[1])\n elif kind == \"basic\" :\n if len(tree) == 4 :\n return \"alt(\" + convert(tree[1]) + \",\" + convert(tree[3]) + \")\"\n elif len(tree) == 3 :\n return parse_single_op_string(tree[2]) + convert(tree[1]) + \")\"*len(tree[2])\n else :\n return convert(tree[1])\n elif kind == \"RE\" :\n if len(tree) == 3 and tree[2][1][0] != 'eol' :\n return \"seq(\" + convert(tree[1]) + \",\" + convert(tree[2]) + \")\"\n else :\n return convert(tree[1])\n else :\n print \"invalid node tag : {}\".format(kind)", "def _get_np_op(name):\n for mod in _ONP_OP_MODULES:\n op = getattr(mod, name, None)\n if op is not None:\n return op\n raise ValueError('Operator `{}` is not supported by `mxnet.numpy`.'.format(name))", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def find_label_operator(query):\n # If you apply any changes into these regex patterns, please update the JSON schema consequently at:\n # depc/schemas/v1_config.json\n # Rule\n regex = r\"^rule.(.+|'.+')$\"\n match = re.search(regex, query)\n if match:\n rule = match.group(1)\n if rule.startswith(\"'\"):\n rule = rule[1:-1]\n return RuleOperator, {\"rule\": rule}\n\n # Operation AND, OR (no argument)\n regex = (\n r\"^operation.(AND|OR)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n )\n match = re.search(regex, query)\n if match:\n # Transform '[Foo, Bar]' into a Python list\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation ATLEAST (integer argument)\n regex = r\"^operation.(ATLEAST\\([0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation RATIO (float integer less than 0)\n regex = r\"^operation.(RATIO\\(0.[0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Aggregation AVERAGE, MIN, MAX\n regex = r\"^aggregation.(AVERAGE|MIN|MAX)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return AggregationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # We validate the schema before save it in database,\n # it's not possible to go here.\n return None, None", "def operator_to_vector(op):\n if op.type in ['super', 'operator-ket', 'operator-bra']:\n raise TypeError(\"Cannot convert object already \"\n \"in super representation\")\n return Qobj(stack_columns(op.data),\n dims=[op.dims, [1]],\n type='operator-ket',\n superrep=\"super\",\n copy=False)", "def nn_to_rpn(self, nn):\n expression = []\n ops = []\n\n # handle +-*/) to add a space before and after the operator\n nn = nn.strip()\n nn = re.sub(r\"(?P<operator>[+\\-*/])\", add_spaces_operator, nn)\n # handle the wrongly replaced \" * * \"(maybe many spaces around *) to \"**\"\n nn = re.sub(r\" *\\* {2}\\* *\", \"**\", nn)\n nn = re.sub(r\"(?P<operator>[(])\", add_spaces_left_bracket, nn)\n nn = re.sub(r\"(?P<operator>[)])\", add_spaces_right_bracket, nn)\n items = re.split(r\"\\s+\", nn)\n for item in items:\n if item in [\"+\", \"-\", \"*\", \"/\"]:\n while len(ops) >= 0:\n if len(ops) == 0:\n ops.append(item)\n break\n op = ops.pop()\n if op == \"(\" or self.ops_rule[item] > self.ops_rule[op]:\n ops.append(op)\n ops.append(item)\n break\n else:\n expression.append(op)\n elif item == \"(\":\n ops.append(item)\n elif item == \")\":\n while len(ops) > 0:\n op = ops.pop()\n if op == \"(\":\n break\n else:\n expression.append(op)\n else:\n expression.append(item)\n\n while len(ops) > 0:\n expression.append(ops.pop())\n\n return expression", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def replace_operators_by_calls(topconstruct, opname, call, call_id_construct):\n # find all computations\n for computation in query([is_computation], TreeItem(topconstruct)):\n replace_op_by_call(computation.construct, opname, call, call_id_construct)", "def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)", "def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper", "def get_operator(key):\n # Check for simple operators\n if key.startswith('re_'):\n operator = np.real\n newkey = key[3:]\n elif key.startswith('im_'):\n operator = np.imag\n newkey = key[3:]\n elif key.startswith('abs_'):\n operator = np.abs\n newkey = key[4:] \n else:\n operator = None \n newkey = key\n \n return operator, newkey", "def rename(op_name):\n return type(op_name, (OpConverter,), {})", "def _basic_operators_init():\n global BASIC_OPERATORS\n\n BASIC_OPERATORS = {\n \"angle_between\": {\n \"node\": \"angleBetween\",\n \"inputs\": [\n [\"vector1X\", \"vector1Y\", \"vector1Z\"],\n [\"vector2X\", \"vector2Y\", \"vector2Z\"],\n ],\n \"outputs\": [\n [\"angle\"],\n ],\n },\n\n \"average\": {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": 3,\n },\n\n \"blend\": {\n \"node\": \"blendColors\",\n \"inputs\": [\n [\"color1R\", \"color1G\", \"color1B\"],\n [\"color2R\", \"color2G\", \"color2B\"],\n [\"blender\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"choice\": {\n \"node\": \"choice\",\n \"inputs\": [\n [\"input[{array}]\"],\n [\"selector\"],\n ],\n \"outputs\": [\n [\"output\"],\n ],\n },\n\n \"clamp\": {\n \"node\": \"clamp\",\n \"inputs\": [\n [\"inputR\", \"inputG\", \"inputB\"],\n [\"minR\", \"minG\", \"minB\"],\n [\"maxR\", \"maxG\", \"maxB\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"compose_matrix\": {\n \"node\": \"composeMatrix\",\n \"inputs\": [\n [\"inputTranslateX\", \"inputTranslateY\", \"inputTranslateZ\"],\n [\"inputRotateX\", \"inputRotateY\", \"inputRotateZ\"],\n [\"inputScaleX\", \"inputScaleY\", \"inputScaleZ\"],\n [\"inputShearX\", \"inputShearY\", \"inputShearZ\"],\n [\"inputRotateOrder\"],\n [\"useEulerRotation\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"decompose_matrix\": {\n \"node\": \"decomposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputTranslateX\", \"outputTranslateY\", \"outputTranslateZ\"],\n [\"outputRotateX\", \"outputRotateY\", \"outputRotateZ\"],\n [\"outputScaleX\", \"outputScaleY\", \"outputScaleZ\"],\n [\"outputShearX\", \"outputShearY\", \"outputShearZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"inverse_matrix\": {\n \"node\": \"inverseMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"length\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"point1X\", \"point1Y\", \"point1Z\"],\n [\"point2X\", \"point2Y\", \"point2Z\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"matrix_distance\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"inMatrix1\"],\n [\"inMatrix2\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"mult_matrix\": {\n \"node\": \"multMatrix\",\n \"inputs\": [\n [\n \"matrixIn[{array}]\"\n ],\n ],\n \"outputs\": [\n [\"matrixSum\"],\n ],\n },\n\n \"normalize_vector\": {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": 0,\n },\n\n \"pair_blend\": {\n \"node\": \"pairBlend\",\n \"inputs\": [\n [\"inTranslateX1\", \"inTranslateY1\", \"inTranslateZ1\"],\n [\"inRotateX1\", \"inRotateY1\", \"inRotateZ1\"],\n [\"inTranslateX2\", \"inTranslateY2\", \"inTranslateZ2\"],\n [\"inRotateX2\", \"inRotateY2\", \"inRotateZ2\"],\n [\"weight\"],\n [\"rotInterpolation\"],\n ],\n \"outputs\": [\n [\"outTranslateX\", \"outTranslateY\", \"outTranslateZ\"],\n [\"outRotateX\", \"outRotateY\", \"outRotateZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"point_matrix_mult\": {\n \"node\": \"pointMatrixMult\",\n \"inputs\": [\n [\"inPointX\", \"inPointY\", \"inPointZ\"],\n [\"inMatrix\"],\n [\"vectorMultiply\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n },\n\n \"remap_value\": {\n \"node\": \"remapValue\",\n \"inputs\": [\n [\"inputValue\"],\n [\"outputMin\"],\n [\"outputMax\"],\n [\"inputMin\"],\n [\"inputMax\"],\n ],\n \"outputs\": [\n [\"outValue\"],\n ],\n },\n\n \"set_range\": {\n \"node\": \"setRange\",\n \"inputs\": [\n [\"valueX\", \"valueY\", \"valueZ\"],\n [\"minX\", \"minY\", \"minZ\"],\n [\"maxX\", \"maxY\", \"maxZ\"],\n [\"oldMinX\", \"oldMinY\", \"oldMinZ\"],\n [\"oldMaxX\", \"oldMaxY\", \"oldMaxZ\"],\n ],\n \"outputs\": [\n [\"outValueX\", \"outValueY\", \"outValueZ\"],\n ],\n },\n\n \"transpose_matrix\": {\n \"node\": \"transposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n }\n\n # Fill BASIC_OPERATORS with condition operations\n cond_operators = [\"eq\", \"ne\", \"gt\", \"ge\", \"lt\", \"le\"]\n for i, condition_operator in enumerate(cond_operators):\n BASIC_OPERATORS[condition_operator] = {\n \"node\": \"condition\",\n \"inputs\": [\n [\"firstTerm\"],\n [\"secondTerm\"],\n ],\n # The condition node is a special case! It gets created during\n # the magic-method-comparison and fully connected after being\n # passed on to the condition()-method in this OperatorMetaClass\n \"outputs\": [\n [None],\n ],\n \"operation\": i,\n }\n\n # Fill BASIC_OPERATORS with +,- operations\n for i, add_sub_operator in enumerate([\"add\", \"sub\"]):\n BASIC_OPERATORS[add_sub_operator] = {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with *,/,** operations\n for i, mult_div_operator in enumerate([\"mul\", \"div\", \"pow\"]):\n BASIC_OPERATORS[mult_div_operator] = {\n \"node\": \"multiplyDivide\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with vectorProduct operations\n for i, vector_product_operator in enumerate([\"dot\", \"cross\"]):\n BASIC_OPERATORS[vector_product_operator] = {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def from_name(self, name):\n return self._name_to_operator.get(name.lower())", "def __init__(self):\n self.operators_dict = {\n '+': {'function': lambda x=0, y=0: x + y, 'priority': 4},\n '-': {'function': lambda x=0, y=0: x - y, 'priority': 4},\n '*': {'function': lambda x, y: x * y, 'priority': 3},\n '/': {'function': lambda x, y: Error(id=8, arg='/') if y == 0 else x/y, 'priority': 3},\n '%': {'function': lambda x, y: x % y, 'priority': 3},\n '//': {'function': lambda x, y: x // y, 'priority': 3},\n '^': {'function': lambda x, y: Error(id=7, arg='^') if x < 0 and isinstance(y, float) else x ** y, 'priority': 1},\n '==': {'function': lambda x, y: x == y, 'priority': 9},\n '!=': {'function': lambda x, y: x != y, 'priority': 9},\n '>': {'function': lambda x, y: x > y, 'priority': 9},\n '<': {'function': lambda x, y: x < y, 'priority': 9},\n '>=': {'function': lambda x, y: x >= y, 'priority': 9},\n '<=': {'function': lambda x, y: x <= y, 'priority': 9},\n }", "def getOperatorName(self):\n return _libsbml.ASTNode_getOperatorName(self)", "def gen_node_script(env: jinja2.environment.Environment, graph: onnx.GraphProto, node: onnx.NodeProto) \\\n -> operator_gen.GeneratedScriptPart:\n try:\n return operator_generators[node.op_type](env, graph, node)\n except KeyError as error:\n print(\"Operator \" + str(node.op_type) + \" not supported\")\n raise error", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def special_math_func(state, other, operator):\n if not hasattr(other, '__iter__'):\n # other is just a number\n results = [getattr(state[each], operator)(other)\n for each in state.keys()]\n else:\n try:\n # Both are dictionaries\n results = [getattr(state[each], operator)(other[each])\n for each in state]\n except IndexError:\n # Both are iterables, but other is not a dictionary\n results = [getattr(state[i], operator)(j)\n for i, j in zip(state, other)]\n out = State(zip(state.keys(), results))\n return out", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"", "def apply_math_operations(operand1, operand2, operator):\n\t\tlogger.info(\"in the apply math\")\n\t\tif operator == \"+\":\n\t\t\tresult = operand1 + operand2\n\t\t\treturn result\n\n\t\telif operator == \"-\":\n\t\t\tresult = operand1 - operand2\n\t\t\treturn result\n\n\t\telif operator == \"*\":\n\t\t\tresult = operand1 * operand2\n\t\t\treturn result\n\n\t\telif operator == \"/\":\n\t\t\tresult = operand1 / operand2\n\t\t\treturn result\n\t\telse:\n\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\traise Exception(\"Not a valid operator\")", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def math_operation(expression):\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)" ]
[ "0.7403406", "0.68868965", "0.670449", "0.6681713", "0.65929717", "0.65538996", "0.6334976", "0.6330674", "0.63050497", "0.62751913", "0.59945136", "0.59220994", "0.58847594", "0.582918", "0.58211267", "0.5793635", "0.579274", "0.57560617", "0.56892043", "0.5672466", "0.56405747", "0.5610882", "0.56029516", "0.559694", "0.5591294", "0.558444", "0.5562065", "0.5551155", "0.5549541", "0.55332077", "0.5526717", "0.54953724", "0.5490179", "0.5467821", "0.54557407", "0.54470134", "0.5444345", "0.54400986", "0.5439515", "0.5412687", "0.54043055", "0.54027665", "0.5398757", "0.539287", "0.5386136", "0.5372552", "0.5363466", "0.5354154", "0.5353227", "0.5337376", "0.5319679", "0.5308098", "0.5303976", "0.53020406", "0.52877456", "0.5287031", "0.52839136", "0.5271615", "0.526433", "0.5262349", "0.5249776", "0.524124", "0.5239536", "0.52374965", "0.52347326", "0.52342457", "0.52270716", "0.52269936", "0.52200663", "0.5218901", "0.5217659", "0.5209401", "0.5203245", "0.5194862", "0.51937276", "0.5173491", "0.51685846", "0.51627964", "0.5157473", "0.51557577", "0.51499325", "0.5145582", "0.51444983", "0.5143105", "0.51421756", "0.5131915", "0.51316285", "0.5131017", "0.51244384", "0.5115957", "0.5114265", "0.5114209", "0.5113397", "0.51104724", "0.51092315", "0.5106267", "0.5103437", "0.5098603", "0.50943494", "0.5091587" ]
0.7269648
1
Convert augmented assignment to assignment plus function call.
def visit_AugAssign(self, node): # FIXME: Gensym the LHS to avoid two evaluations. self.generic_visit(node) rhs = to_call(self.op_to_function(node.op), [set_ctx(node.target), node.value]) return ast.Assign([node.target], rhs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def expand_callable(self, call_expr):\n call_expr.func = ast.Attribute(value=call_expr.func, attr='__call__')", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')", "def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def copy_stmt(self, env, dst_marking_var, src_marking_var):\n field = self.field\n return pyast.E(\"{} = {}\".format(field.access_from(dst_marking_var), field.access_from(src_marking_var)))", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def expand_as(predicate_string): \n def callback(frame, name, func, old_locals):\n from peak.rules.predicates import _expand_as\n kind, module, locals_, globals_ = core.frameinfo(frame)\n return _expand_as(\n func, predicate_string, locals_, globals_, __builtins__\n )\n return core.decorate_assignment(callback)", "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret", "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])],\n value)", "def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def __call__(self, a):\n self.x = a\n return self.forward(a)", "def to_op(self):\n raise NotImplementedError", "def on(argument_name):\n\n def callback(frm,name,value,old_locals):\n return _mkGeneric(value,argument_name)\n\n from dispatch.functions import _mkGeneric\n from peak.util.decorators import decorate_assignment\n return decorate_assignment(callback)", "def store_and(callable):\n\n class store_and_action(argparse.Action):\n def __call__(self, *args, **kwargs):\n callable.__call__(args[2])\n setattr(args[1], self.dest, args[2])\n\n return store_and_action", "def decorate_assignment(callback, depth=2, frame=None):\n frame = enclosing_frame(frame, depth+1)\n oldtrace = [frame.f_trace]\n old_locals = frame.f_locals.copy()\n\n def tracer(frm, event, arg):\n if event == 'call':\n # We don't want to trace into any calls\n if oldtrace[0]:\n # ...but give the previous tracer a chance to, if it wants\n return oldtrace[0](frm, event, arg)\n else:\n return None\n\n try:\n if frm is frame and event != 'exception':\n # Aha, time to check for an assignment...\n for k, v in frm.f_locals.items():\n if k not in old_locals or old_locals[k] is not v:\n break\n else:\n # No luck, keep tracing\n return tracer\n\n # Got it, fire the callback, then get the heck outta here...\n frm.f_locals[k] = callback(frm, k, v, old_locals)\n\n finally:\n # Give the previous tracer a chance to run before we return\n if oldtrace[0]:\n # And allow it to replace our idea of the \"previous\" tracer\n oldtrace[0] = oldtrace[0](frm, event, arg)\n\n uninstall()\n return oldtrace[0]\n\n def uninstall():\n # Unlink ourselves from the trace chain.\n frame.f_trace = oldtrace[0]\n sys.settrace(oldtrace[0])\n\n # Install the trace function\n frame.f_trace = tracer\n sys.settrace(tracer)\n\n def do_decorate(f):\n # Python 2.4 '@' compatibility; call the callback\n uninstall()\n frame = sys._getframe(1)\n return callback(\n frame, getattr(f, '__name__', None), f, frame.f_locals\n )\n\n return do_decorate", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def test_augassign_recursion():\n # infinitely recurses in python\n code = \"\"\"\n def rec():\n a = 0\n a += rec()\n return a\n rec()\n \"\"\"\n cls_node = extract_node(code)\n assert next(cls_node.infer()) is util.Uninferable", "def compile_update(py_ast, filename):\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code)\n optimize_locals(bp_code.code)\n bp_code.code = inject_inversion(bp_code.code)\n bp_code.newlocals = False\n bp_code.args = ('_[inverter]', '_[value]') + bp_code.args\n return bp_code.to_code()", "def eval(*args, **kwargs):\n\n pass", "def transform(self, func):\n return func(self)", "def replaces(func: Callable[..., Tuple[str]], name: str):\n Replacements._rep[name] = func\n return func", "def replaces_operator(func: Callable[[Any, Any, str, str], Tuple[str]],\n classname: str,\n optype: str,\n otherclass: str = None):\n if otherclass is None:\n otherclass = classname\n Replacements._oprep[(classname, otherclass, optype)] = func\n return func", "def __call__(self, function: FuncStrArg):\n self._add_attr(function)\n return function", "def test_001_lambda_assign(self):\r\n text = \"\"\"\r\n const f = (d,k,v) => d[k] = v\r\n \"\"\"\r\n tokens = Lexer().lex(text)\r\n ast = Parser().parse(tokens)\r\n expected = TOKEN('T_MODULE', '',\r\n TOKEN('T_VAR', 'const',\r\n TOKEN('T_ASSIGN', '=',\r\n TOKEN('T_TEXT', 'f'),\r\n TOKEN('T_LAMBDA', '=>',\r\n TOKEN('T_TEXT', 'Anonymous'),\r\n TOKEN('T_ARGLIST', '()',\r\n TOKEN('T_TEXT', 'd'),\r\n TOKEN('T_TEXT', 'k'),\r\n TOKEN('T_TEXT', 'v')\r\n ),\r\n TOKEN('T_ASSIGN', '=',\r\n TOKEN('T_SUBSCR', '',\r\n TOKEN('T_TEXT', 'd'),\r\n TOKEN('T_TEXT', 'k'))\r\n ),\r\n TOKEN('T_TEXT', 'v')\r\n )\r\n )\r\n )\r\n )\r\n\r\n self.assertFalse(parsecmp(expected, ast, False))", "def val_at(self, *args, **kwargs):\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.ss_i(0), \"@\"), _m(self.ss_i(0))", "def process_call_byref_assign(topconstruct):\n for topcalls in query([is_layering([syntax.CALL, syntax.ASSIGNMENT, syntax.PROGRAM])], TreeItem(topconstruct)):\n assignment = topcalls.parent_item\n #c = topcalls.construct\n # -- check the args of this call: do them contain a reference\n # we need to find all the\n refs = query([is_layering([syntax.REFERENCE, syntax.CALL, syntax.ASSIGNMENT, syntax.PROGRAM])], topcalls)\n if len(refs) > 0:\n var_names = list(map(lambda r: r.construct.args[0].args[0], refs))\n var_names.insert(0, assignment.construct.args[0])\n res_tuple = syntax.Construct(syntax.PY_TUPLE, var_names)\n # here we need to create a tuple\n assignment.construct.args[0] = res_tuple", "def __call__(value):", "def assign(self, dst, req, src):\n if req == 'null':\n return\n if req in ('write', 'inplace'):\n dst[:] = src\n elif req == 'add':\n dst[:] += src", "def SBMLTransforms_expandInitialAssignments(*args):\n return _libsbml.SBMLTransforms_expandInitialAssignments(*args)", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def gen_apply(self, g, ng, node):\n with About(node.debug, self.relation):\n if node is g.output:\n new_node = ng.add_parameter()\n else:\n new_node = ng.apply()\n # NOTE: First parameter to remap_node is (g, node) instead of just\n # node. This lets us dispatch to a different node depending on whether\n # it belongs to the graph that uses it, or is a free variable.\n self.remap_node((g, node), g, node, ng, new_node)", "def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):", "def create_Assign(left_hand_side, right_hand_side):\n right_hand_side.ctx = ast.Load()\n left_hand_side.ctx = ast.Store()\n return ast.Assign(targets=[left_hand_side], value=right_hand_side)", "def _eval_subs(self, old, new):\n return None", "def assign_from_values_fn(var_names_to_values):\n assign_op, feed_dict = assign_from_values(var_names_to_values)\n def callback(session):\n return session.run(assign_op, feed_dict)\n return callback", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def _compat_assign_gast_5(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value, type_comment=type_comment)", "def __call__(a, b):", "def hook_local_eval(bridge_conn, eval_expr, eval_globals, eval_locals):\n\n # first, bind the eval function to the arguments\n prepped_function = functools.partial(eval, eval_expr, eval_globals, eval_locals)\n\n return call_execute_sync_and_get_result(prepped_function)", "def get_assignments(function_node, object_name):\n\n # This only supports simple assignments such as \"name.attr = value\" or \"name[index] = value\". Other\n # assignments will either throw an exception or not return the correct thing.\n # This code could be modified to allow for more robust statements but I kept it simple because the\n # code should already be formatted for these conditions.\n assignments = []\n for node in ast.walk(function_node):\n if isinstance(node, ast.Assign):\n assign = node\n if isinstance(assign.targets[0], ast.Subscript):\n subscript = assign.targets[0]\n if isinstance(subscript.value, ast.Name):\n name = subscript.value.id # This is the ast.Name related to the object_name\n if name == object_name:\n subscript_value = convert_literal_node(subscript.slice.value)\n value = convert_literal_node(assign.value)\n new_assignment = SubscriptAssignment(object_name=object_name, subscript=subscript_value, value=value)\n assignments.append(new_assignment)\n elif isinstance(assign.targets[0], ast.Attribute):\n attribute = assign.targets[0]\n if isinstance(attribute.value, ast.Name):\n name = attribute.value.id # This is the ast.Name related to the object_name\n if name == object_name:\n attribute_name = attribute.attr\n attribute_value = convert_literal_node(assign.value)\n new_assignment = AttributeAssignment(object_name=object_name, attribute=attribute_name, value=attribute_value)\n assignments.append(new_assignment)\n return assignments", "def _get_intermediate_simp(deffunc=lambda x: x, offfunc=lambda x: x,\n onfunc=_dotprodsimp, dotprodsimp=None):\n\n if dotprodsimp is False or _dotprodsimp_state.state is False:\n return offfunc\n if dotprodsimp is True or _dotprodsimp_state.state is True:\n return onfunc\n\n return deffunc # None, None", "def _arg_swapper(op):\n\n def op_swapped(a, b, *args, **kwargs):\n return op(b, a, *args, **kwargs)\n\n return op_swapped", "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def infer(self):\n self.eval()", "def dispatch_as(*decorators):\n\n if len(decorators)>1:\n decorators = list(decorators)\n decorators.reverse()\n\n def callback(frame,k,v,old_locals):\n for d in decorators:\n v = d(v)\n return v\n\n from peak.util.decorators import decorate_assignment\n return decorate_assignment(callback)", "def eval(self, A):\n\t\tpass", "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def assign(self, *args):\n return _ida_hexrays.cif_t_assign(self, *args)", "def eval(*args, **kwargs)->Any:\n pass", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)" ]
[ "0.6730379", "0.60008764", "0.59139097", "0.5711847", "0.56486744", "0.56235904", "0.558098", "0.5525872", "0.5452114", "0.5416577", "0.5416467", "0.53805333", "0.53741395", "0.53737843", "0.53596747", "0.5325352", "0.5319974", "0.5307721", "0.52776676", "0.5269577", "0.52540934", "0.5252053", "0.5249622", "0.52414185", "0.5238813", "0.5232454", "0.517946", "0.5175198", "0.51656896", "0.5159953", "0.5154017", "0.51002884", "0.5086747", "0.5073234", "0.5045694", "0.5019232", "0.50144404", "0.4981941", "0.49769476", "0.49733734", "0.49705222", "0.49682513", "0.49662715", "0.49643147", "0.49522936", "0.494866", "0.49459097", "0.4940804", "0.4933052", "0.49267647", "0.49197882", "0.48960716", "0.4888613", "0.48606953", "0.48490667", "0.48482317", "0.4841442", "0.4838292", "0.48382747", "0.483659", "0.4829242", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.48207438", "0.4820709", "0.4820709", "0.4820709", "0.4820709", "0.48176223", "0.4804491", "0.4795186", "0.47888538", "0.4785796", "0.4783103", "0.47829014", "0.47704196", "0.47616512", "0.4759803", "0.47586197", "0.4758543", "0.47535783", "0.47408476", "0.47084174", "0.47084174" ]
0.61607265
1
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() name = operator_table.get(name, name) return to_attribute(self.operator, name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator", "def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def to_op(self):\n raise NotImplementedError", "def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def rhs_as_python_func(self, namespace=None):\n namespace = namespace or {}\n\n return eval(\"lambda %s: %s\" % (','.join(self.rhs_names), self.rhs),\n str_to_npfunc_map, namespace)\n # math_namespace.namespace, namespace)", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))", "def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)", "def translate(expr):\n return from_python(ast.parse(expr))", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def convert_exp(node, **kwargs):\n return create_basic_op_node('Exp', node, kwargs)", "def apply(expr, fun_annotate_subexpr = None):\n assert isinstance(expr, Expression)\n t = type(expr)\n if t is Op:\n try:\n pre, suff = ExprTranslator.OPS_TO_SMTLIB[expr.id]\n return ExprTranslator.subexpr_to_smtlib(expr, pre, suff, fun_annotate_subexpr)\n except KeyError:\n raise Exception(str(expr.id) + ': operation not supported!')\n\n elif t is Var:\n return expr.get_text()\n elif t is ConstInt or t is ConstBool or t is ConstReal:\n return str(expr.get_text())\n elif t is ExprHole:\n return expr.hole_decl.get_function_call()\n else:\n raise Exception(str(t)+': expression type not supported!')", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)", "def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def convert(ast):\n\n if ast and ast.type == \"Function\":\n # Activity function conversion\n if (\n ast.name != \"molecularActivity\"\n and ast.name in belspec[\"namespaces\"][\"Activity\"][\"list\"]\n ):\n print(\"name\", ast.name, \"type\", ast.type)\n ast = convert_activity(ast)\n return ast # Otherwise - this will trigger on the BEL2 molecularActivity\n\n # translocation conversion\n elif ast.name in [\"tloc\", \"translocation\"]:\n ast = convert_tloc(ast)\n\n fus_flag = False\n for idx, arg in enumerate(ast.args):\n if arg.__class__.__name__ == \"Function\":\n\n # Fix substitution -> variation()\n if arg.name in [\"sub\", \"substitution\"]:\n ast.args[idx] = convert_sub(arg)\n\n elif arg.name in [\"trunc\", \"truncation\"]:\n ast.args[idx] = convert_trunc(arg)\n\n elif arg.name in [\"pmod\", \"proteinModification\"]:\n ast.args[idx] = convert_pmod(arg)\n\n elif arg.name in [\"fus\", \"fusion\"]:\n fus_flag = True\n\n # Recursively process Functions\n ast.args[idx] = convert(ast.args[idx])\n\n if fus_flag:\n ast = convert_fus(ast)\n\n return ast", "def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)", "def convert(value):\n if isinstance(value, (Function, NodeBase)):\n return value\n\n if callable(value):\n return _convert_tvm_func(value)\n\n return _convert_to_node(value)", "def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))", "def eval(node):\n if node.id == '(literal)':\n return node.value\n elif node.id == '(name)':\n return scope[node.value]\n elif node.id == '(':\n name, args = node.children\n name = eval(name)\n args = map(eval, args)\n return name(*args)\n elif node.id == 'and':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return eval(node.children[1])\n else:\n return first\n elif node.id == 'or':\n assert len(node.children) == 2\n first = eval(node.children[0])\n if first:\n return first\n else:\n return eval(node.children[1])\n elif node.id == 'not':\n assert len(node.children) == 1\n return not eval(node.children[0])\n elif node.id in prefix_operators and len(node.children) == 1:\n value = eval(node.children[0])\n return prefix_operators[node.id](value)\n elif node.id in operators:\n values = [eval(v) for v in node.children]\n return operators[node.id](*values)\n else:\n raise ValueError('unknown node type', node)", "def str_to_operator(s):\n return {\n # https://docs.python.org/3/library/operator.html#mapping-operators-to-functions\n \"<\": operator.lt,\n \"<=\": operator.le,\n \"==\": operator.eq,\n \"!=\": operator.ne,\n \">=\": operator.ge,\n \">\": operator.gt,\n }[s]", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def get_symbol(operator):\r\n if isinstance(operator, AST):\r\n operator = type(operator)\r\n try:\r\n return ALL_SYMBOLS[operator]\r\n except KeyError:\r\n raise LookupError('no known symbol for %r' % operator)", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def op(self) -> str:\n return self._node.get(\"op\")", "def do_math(operator, op1, op2):\n if operator == \"*\":\n return op1 * op2\n if operator == \"/\":\n return op1 / op2\n if operator == \"+\":\n return op1 + op2\n if operator == \"-\":\n return op1 - op2\n if operator == \"^\":\n return op1**(op2)", "def convert_unary_op(g, op, block):\n\n # op_map stores mapping relationship between paddlepaddle and relay\n op_map = {\"isinf_v2\": _op.isinf, \"isfinite_v2\": _op.isfinite, \"isnan_v2\": _op.isnan}\n if op.type in op_map:\n unary_func = op_map[op.type]\n else:\n # while paddle operator's name is same with relay\n unary_func = get_relay_op(op.type)\n out = unary_func(g.get_node(op.input(\"X\")[0]))\n g.add_node(op.output(\"Out\")[0], out)", "def _remove_operator(self, operator):", "def operator(app):\n return car(app)", "def load_operator(descriptor_operator_bytes: bytes):\n assert len(descriptor_operator_bytes) > 0\n function_desc_bytes, module_name, class_name = gateway_client.deserialize(\n descriptor_operator_bytes\n )\n if function_desc_bytes:\n return create_operator_with_func(function.load_function(function_desc_bytes))\n else:\n assert module_name\n assert class_name\n mod = importlib.import_module(module_name)\n cls = getattr(mod, class_name)\n logger.info(f\"Load cls type {cls}, {class_name} {mod}\")\n from raystreaming.operator import Operator\n\n assert issubclass(cls, Operator)\n return cls()", "def __call__(self):\n new_node = Op.__call__(self)\n return new_node", "def get_node_target(submodules: Mapping[str, torch.nn.Module], node: pippy.fx.Node) -> str:\n\n assert node.op in CALLABLE_NODE_OPS, (\n \"Expect op types of \" + \", \".join(CALLABLE_NODE_OPS) + f\", but found {node.op}\"\n )\n\n if node.op == \"call_module\":\n assert isinstance(node.target, str)\n submod = submodules[node.target]\n submod_type = getattr(submod, \"_base_class_origin\", type(submod))\n return get_acc_ops_name(submod_type)\n elif node.op == \"call_function\":\n target: Any = node.target\n return (\n f\"acc_ops.{target.__name__}\"\n if target.__module__ is not None and \"acc_ops\" in target.__module__\n else _get_qualified_name(target)\n )\n else:\n assert isinstance(node.target, str)\n return node.target", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def _calculate(self, node):\n if isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return self._operators[type(node.op)](\n self._calculate(node.left),\n self._calculate(node.right)\n )\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return self._operators[type(node.op)](self._calculate(node.operand))\n else:\n raise TypeError(node)", "def _extract_ops_from_onnx_graph(graph, operators, domain_opset_map):\n\n for operator in graph.node:\n # empty domain is used as an alias for 'ai.onnx'\n domain = operator.domain if operator.domain else \"ai.onnx\"\n\n if domain not in operators or domain not in domain_opset_map:\n continue\n\n operators[domain][domain_opset_map[domain]].add(operator.op_type)\n\n for attr in operator.attribute:\n if attr.type == onnx.AttributeProto.GRAPH: # process subgraph\n _extract_ops_from_onnx_graph(attr.g, operators, domain_opset_map)\n elif attr.type == onnx.AttributeProto.GRAPHS:\n # Currently no ONNX operators use GRAPHS.\n # Fail noisily if we encounter this so we can implement support\n raise RuntimeError(\"Unexpected attribute proto of GRAPHS\")", "def ops(rule):\n ops_dict = {'>' : operator.gt,\n '<' : operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=' : operator.eq,\n '==' : operator.eq}\n return ops_dict[rule]", "def singa_op_to_onnx_node(cls, op, op_t):\n optype = cls._get_singa_op_type(op)\n # wether the operator needs special handler\n if optype in cls._special_operators:\n translator = getattr(cls, cls._special_operators[optype])\n else:\n translator = cls._common_singa_tensor_to_onnx_node\n nodes = translator(op, op_t)\n if not isinstance(nodes, collections.Iterable):\n nodes = [nodes]\n nodes = [node for node in nodes if node is not None]\n return nodes", "def test_function_statement_at_operator():\n r = convert_code(\"{@foo arg1=bar arg2=3}\")\n assert r == \"{{ {'arg1': bar, 'arg2': 3}|foo }}\"", "def get_func_ast(obj : types.FunctionType):\n return get_ast(obj).body[0]", "def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'", "def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})", "def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def convert(tree) :\n kind = tree[0]\n\n if kind == \"dot\" :\n return \"dot\" \n elif kind == \"eol\" :\n return \"eol\"\n elif kind == \"char\" :\n return \"lit('\" + tree[1] + \"')\"\n elif kind == \"set\" :\n return \"oneof('\" + tree[1] + \"')\"\n elif kind == \"elem\" :\n if len(tree) >= 3 :\n return convert(tree[2]) \n else :\n return convert(tree[1])\n elif kind == \"basic\" :\n if len(tree) == 4 :\n return \"alt(\" + convert(tree[1]) + \",\" + convert(tree[3]) + \")\"\n elif len(tree) == 3 :\n return parse_single_op_string(tree[2]) + convert(tree[1]) + \")\"*len(tree[2])\n else :\n return convert(tree[1])\n elif kind == \"RE\" :\n if len(tree) == 3 and tree[2][1][0] != 'eol' :\n return \"seq(\" + convert(tree[1]) + \",\" + convert(tree[2]) + \")\"\n else :\n return convert(tree[1])\n else :\n print \"invalid node tag : {}\".format(kind)", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def _get_np_op(name):\n for mod in _ONP_OP_MODULES:\n op = getattr(mod, name, None)\n if op is not None:\n return op\n raise ValueError('Operator `{}` is not supported by `mxnet.numpy`.'.format(name))", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def find_label_operator(query):\n # If you apply any changes into these regex patterns, please update the JSON schema consequently at:\n # depc/schemas/v1_config.json\n # Rule\n regex = r\"^rule.(.+|'.+')$\"\n match = re.search(regex, query)\n if match:\n rule = match.group(1)\n if rule.startswith(\"'\"):\n rule = rule[1:-1]\n return RuleOperator, {\"rule\": rule}\n\n # Operation AND, OR (no argument)\n regex = (\n r\"^operation.(AND|OR)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n )\n match = re.search(regex, query)\n if match:\n # Transform '[Foo, Bar]' into a Python list\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation ATLEAST (integer argument)\n regex = r\"^operation.(ATLEAST\\([0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation RATIO (float integer less than 0)\n regex = r\"^operation.(RATIO\\(0.[0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Aggregation AVERAGE, MIN, MAX\n regex = r\"^aggregation.(AVERAGE|MIN|MAX)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return AggregationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # We validate the schema before save it in database,\n # it's not possible to go here.\n return None, None", "def operator_to_vector(op):\n if op.type in ['super', 'operator-ket', 'operator-bra']:\n raise TypeError(\"Cannot convert object already \"\n \"in super representation\")\n return Qobj(stack_columns(op.data),\n dims=[op.dims, [1]],\n type='operator-ket',\n superrep=\"super\",\n copy=False)", "def nn_to_rpn(self, nn):\n expression = []\n ops = []\n\n # handle +-*/) to add a space before and after the operator\n nn = nn.strip()\n nn = re.sub(r\"(?P<operator>[+\\-*/])\", add_spaces_operator, nn)\n # handle the wrongly replaced \" * * \"(maybe many spaces around *) to \"**\"\n nn = re.sub(r\" *\\* {2}\\* *\", \"**\", nn)\n nn = re.sub(r\"(?P<operator>[(])\", add_spaces_left_bracket, nn)\n nn = re.sub(r\"(?P<operator>[)])\", add_spaces_right_bracket, nn)\n items = re.split(r\"\\s+\", nn)\n for item in items:\n if item in [\"+\", \"-\", \"*\", \"/\"]:\n while len(ops) >= 0:\n if len(ops) == 0:\n ops.append(item)\n break\n op = ops.pop()\n if op == \"(\" or self.ops_rule[item] > self.ops_rule[op]:\n ops.append(op)\n ops.append(item)\n break\n else:\n expression.append(op)\n elif item == \"(\":\n ops.append(item)\n elif item == \")\":\n while len(ops) > 0:\n op = ops.pop()\n if op == \"(\":\n break\n else:\n expression.append(op)\n else:\n expression.append(item)\n\n while len(ops) > 0:\n expression.append(ops.pop())\n\n return expression", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def replace_operators_by_calls(topconstruct, opname, call, call_id_construct):\n # find all computations\n for computation in query([is_computation], TreeItem(topconstruct)):\n replace_op_by_call(computation.construct, opname, call, call_id_construct)", "def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)", "def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper", "def get_operator(key):\n # Check for simple operators\n if key.startswith('re_'):\n operator = np.real\n newkey = key[3:]\n elif key.startswith('im_'):\n operator = np.imag\n newkey = key[3:]\n elif key.startswith('abs_'):\n operator = np.abs\n newkey = key[4:] \n else:\n operator = None \n newkey = key\n \n return operator, newkey", "def rename(op_name):\n return type(op_name, (OpConverter,), {})", "def _basic_operators_init():\n global BASIC_OPERATORS\n\n BASIC_OPERATORS = {\n \"angle_between\": {\n \"node\": \"angleBetween\",\n \"inputs\": [\n [\"vector1X\", \"vector1Y\", \"vector1Z\"],\n [\"vector2X\", \"vector2Y\", \"vector2Z\"],\n ],\n \"outputs\": [\n [\"angle\"],\n ],\n },\n\n \"average\": {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": 3,\n },\n\n \"blend\": {\n \"node\": \"blendColors\",\n \"inputs\": [\n [\"color1R\", \"color1G\", \"color1B\"],\n [\"color2R\", \"color2G\", \"color2B\"],\n [\"blender\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"choice\": {\n \"node\": \"choice\",\n \"inputs\": [\n [\"input[{array}]\"],\n [\"selector\"],\n ],\n \"outputs\": [\n [\"output\"],\n ],\n },\n\n \"clamp\": {\n \"node\": \"clamp\",\n \"inputs\": [\n [\"inputR\", \"inputG\", \"inputB\"],\n [\"minR\", \"minG\", \"minB\"],\n [\"maxR\", \"maxG\", \"maxB\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"compose_matrix\": {\n \"node\": \"composeMatrix\",\n \"inputs\": [\n [\"inputTranslateX\", \"inputTranslateY\", \"inputTranslateZ\"],\n [\"inputRotateX\", \"inputRotateY\", \"inputRotateZ\"],\n [\"inputScaleX\", \"inputScaleY\", \"inputScaleZ\"],\n [\"inputShearX\", \"inputShearY\", \"inputShearZ\"],\n [\"inputRotateOrder\"],\n [\"useEulerRotation\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"decompose_matrix\": {\n \"node\": \"decomposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputTranslateX\", \"outputTranslateY\", \"outputTranslateZ\"],\n [\"outputRotateX\", \"outputRotateY\", \"outputRotateZ\"],\n [\"outputScaleX\", \"outputScaleY\", \"outputScaleZ\"],\n [\"outputShearX\", \"outputShearY\", \"outputShearZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"inverse_matrix\": {\n \"node\": \"inverseMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"length\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"point1X\", \"point1Y\", \"point1Z\"],\n [\"point2X\", \"point2Y\", \"point2Z\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"matrix_distance\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"inMatrix1\"],\n [\"inMatrix2\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"mult_matrix\": {\n \"node\": \"multMatrix\",\n \"inputs\": [\n [\n \"matrixIn[{array}]\"\n ],\n ],\n \"outputs\": [\n [\"matrixSum\"],\n ],\n },\n\n \"normalize_vector\": {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": 0,\n },\n\n \"pair_blend\": {\n \"node\": \"pairBlend\",\n \"inputs\": [\n [\"inTranslateX1\", \"inTranslateY1\", \"inTranslateZ1\"],\n [\"inRotateX1\", \"inRotateY1\", \"inRotateZ1\"],\n [\"inTranslateX2\", \"inTranslateY2\", \"inTranslateZ2\"],\n [\"inRotateX2\", \"inRotateY2\", \"inRotateZ2\"],\n [\"weight\"],\n [\"rotInterpolation\"],\n ],\n \"outputs\": [\n [\"outTranslateX\", \"outTranslateY\", \"outTranslateZ\"],\n [\"outRotateX\", \"outRotateY\", \"outRotateZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"point_matrix_mult\": {\n \"node\": \"pointMatrixMult\",\n \"inputs\": [\n [\"inPointX\", \"inPointY\", \"inPointZ\"],\n [\"inMatrix\"],\n [\"vectorMultiply\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n },\n\n \"remap_value\": {\n \"node\": \"remapValue\",\n \"inputs\": [\n [\"inputValue\"],\n [\"outputMin\"],\n [\"outputMax\"],\n [\"inputMin\"],\n [\"inputMax\"],\n ],\n \"outputs\": [\n [\"outValue\"],\n ],\n },\n\n \"set_range\": {\n \"node\": \"setRange\",\n \"inputs\": [\n [\"valueX\", \"valueY\", \"valueZ\"],\n [\"minX\", \"minY\", \"minZ\"],\n [\"maxX\", \"maxY\", \"maxZ\"],\n [\"oldMinX\", \"oldMinY\", \"oldMinZ\"],\n [\"oldMaxX\", \"oldMaxY\", \"oldMaxZ\"],\n ],\n \"outputs\": [\n [\"outValueX\", \"outValueY\", \"outValueZ\"],\n ],\n },\n\n \"transpose_matrix\": {\n \"node\": \"transposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n }\n\n # Fill BASIC_OPERATORS with condition operations\n cond_operators = [\"eq\", \"ne\", \"gt\", \"ge\", \"lt\", \"le\"]\n for i, condition_operator in enumerate(cond_operators):\n BASIC_OPERATORS[condition_operator] = {\n \"node\": \"condition\",\n \"inputs\": [\n [\"firstTerm\"],\n [\"secondTerm\"],\n ],\n # The condition node is a special case! It gets created during\n # the magic-method-comparison and fully connected after being\n # passed on to the condition()-method in this OperatorMetaClass\n \"outputs\": [\n [None],\n ],\n \"operation\": i,\n }\n\n # Fill BASIC_OPERATORS with +,- operations\n for i, add_sub_operator in enumerate([\"add\", \"sub\"]):\n BASIC_OPERATORS[add_sub_operator] = {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with *,/,** operations\n for i, mult_div_operator in enumerate([\"mul\", \"div\", \"pow\"]):\n BASIC_OPERATORS[mult_div_operator] = {\n \"node\": \"multiplyDivide\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with vectorProduct operations\n for i, vector_product_operator in enumerate([\"dot\", \"cross\"]):\n BASIC_OPERATORS[vector_product_operator] = {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }", "def from_name(self, name):\n return self._name_to_operator.get(name.lower())", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def __init__(self):\n self.operators_dict = {\n '+': {'function': lambda x=0, y=0: x + y, 'priority': 4},\n '-': {'function': lambda x=0, y=0: x - y, 'priority': 4},\n '*': {'function': lambda x, y: x * y, 'priority': 3},\n '/': {'function': lambda x, y: Error(id=8, arg='/') if y == 0 else x/y, 'priority': 3},\n '%': {'function': lambda x, y: x % y, 'priority': 3},\n '//': {'function': lambda x, y: x // y, 'priority': 3},\n '^': {'function': lambda x, y: Error(id=7, arg='^') if x < 0 and isinstance(y, float) else x ** y, 'priority': 1},\n '==': {'function': lambda x, y: x == y, 'priority': 9},\n '!=': {'function': lambda x, y: x != y, 'priority': 9},\n '>': {'function': lambda x, y: x > y, 'priority': 9},\n '<': {'function': lambda x, y: x < y, 'priority': 9},\n '>=': {'function': lambda x, y: x >= y, 'priority': 9},\n '<=': {'function': lambda x, y: x <= y, 'priority': 9},\n }", "def gen_node_script(env: jinja2.environment.Environment, graph: onnx.GraphProto, node: onnx.NodeProto) \\\n -> operator_gen.GeneratedScriptPart:\n try:\n return operator_generators[node.op_type](env, graph, node)\n except KeyError as error:\n print(\"Operator \" + str(node.op_type) + \" not supported\")\n raise error", "def getOperatorName(self):\n return _libsbml.ASTNode_getOperatorName(self)", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def special_math_func(state, other, operator):\n if not hasattr(other, '__iter__'):\n # other is just a number\n results = [getattr(state[each], operator)(other)\n for each in state.keys()]\n else:\n try:\n # Both are dictionaries\n results = [getattr(state[each], operator)(other[each])\n for each in state]\n except IndexError:\n # Both are iterables, but other is not a dictionary\n results = [getattr(state[i], operator)(j)\n for i, j in zip(state, other)]\n out = State(zip(state.keys(), results))\n return out", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"", "def apply_math_operations(operand1, operand2, operator):\n\t\tlogger.info(\"in the apply math\")\n\t\tif operator == \"+\":\n\t\t\tresult = operand1 + operand2\n\t\t\treturn result\n\n\t\telif operator == \"-\":\n\t\t\tresult = operand1 - operand2\n\t\t\treturn result\n\n\t\telif operator == \"*\":\n\t\t\tresult = operand1 * operand2\n\t\t\treturn result\n\n\t\telif operator == \"/\":\n\t\t\tresult = operand1 / operand2\n\t\t\treturn result\n\t\telse:\n\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\traise Exception(\"Not a valid operator\")", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def math_operation(expression):\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)" ]
[ "0.7269387", "0.68856734", "0.67033535", "0.6680751", "0.6593878", "0.65538615", "0.6333541", "0.6330422", "0.63046694", "0.6274489", "0.5995828", "0.5922164", "0.5883966", "0.5829021", "0.5820589", "0.5794242", "0.5792682", "0.57558066", "0.5689504", "0.5672094", "0.56409925", "0.5610246", "0.56021297", "0.5596652", "0.55908126", "0.558397", "0.5561989", "0.55503595", "0.5549611", "0.5534024", "0.55273044", "0.54946715", "0.54902184", "0.5467073", "0.54543746", "0.54469305", "0.5444672", "0.54402345", "0.5439819", "0.5411757", "0.54056454", "0.5403585", "0.5400465", "0.5393477", "0.53863835", "0.5371389", "0.53634405", "0.5354086", "0.5353548", "0.5337064", "0.5318819", "0.53086066", "0.5305684", "0.5302567", "0.5287647", "0.52869153", "0.52849597", "0.52709866", "0.5265015", "0.5262606", "0.52509624", "0.5241238", "0.5239894", "0.5236687", "0.5234725", "0.52337027", "0.5228746", "0.52270436", "0.5220382", "0.5218638", "0.521782", "0.5207852", "0.5203226", "0.5194528", "0.5193524", "0.5173571", "0.5169008", "0.5162824", "0.51577455", "0.515708", "0.51494604", "0.51473427", "0.5145131", "0.5143043", "0.5141935", "0.5132619", "0.5131224", "0.51311415", "0.5125562", "0.511568", "0.51155436", "0.5113618", "0.5113046", "0.5110037", "0.5108707", "0.51055276", "0.5102828", "0.5098719", "0.5094633", "0.509238" ]
0.740287
0
Convert unary operator to function call.
def visit_UnaryOp(self, node): self.generic_visit(node) if isinstance(node.operand, ast.Num): # Don't transform negations of numeric literals. Just treat them # as literals. return node return to_call(self.op_to_function(node.op), [node.operand])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", "def cg_inline_unary(self, fn):\n if fn == 'neg':\n op = '-'\n elif fn == 'not':\n op = '!'\n else:\n raise ValueError(f\"Unknown unary operator: {fn}\")\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP--\n D={op}M // D = MEM[SP]\n {self._cg_push_D}\n \"\"\"))", "def convert_unary_op(g, op, block):\n\n # op_map stores mapping relationship between paddlepaddle and relay\n op_map = {\"isinf_v2\": _op.isinf, \"isfinite_v2\": _op.isfinite, \"isnan_v2\": _op.isnan}\n if op.type in op_map:\n unary_func = op_map[op.type]\n else:\n # while paddle operator's name is same with relay\n unary_func = get_relay_op(op.type)\n out = unary_func(g.get_node(op.input(\"X\")[0]))\n g.add_node(op.output(\"Out\")[0], out)", "def _UnaryOp(self, t):\n self.write(\"(\")\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(\")\")", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def unary_op(self):\n return plist([op(x) for x in self], root=self.__root__)", "def is_unary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol()\n # ret-type operator [++ --](int)\n # globally\n # ret-type operator symbol( arg )\n # ret-type operator [++ --](X&, int)\n symbols = ['!', '&', '~', '*', '+', '++', '-', '--']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 0 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] and \\\n isinstance(oper.arguments[0].type, cpptypes.int_t):\n return True\n else:\n return False\n else:\n if 1 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] \\\n and 2 == len(oper.arguments) \\\n and isinstance(oper.arguments[1].type, cpptypes.int_t):\n # may be I need to add additional check whether first argument is\n # reference or not?\n return True\n else:\n return False", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()", "def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))", "def __call__(self, *args):\n assert is_symbol(self.op) and not self.args\n return Expr(self.op, *args)", "def unary_wrap(run):\n\n def run_unary(transitions, input, steps):\n return run(transitions, '1' * input, steps)\n\n return run_unary", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def unary_unary(self,\n method,\n request_serializer=None,\n response_deserializer=None):\n return UnaryUnaryMultiCallable(self._channel, _common.encode(method),\n request_serializer,\n response_deserializer)", "def unary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node:\n input_node = as_node(input_value)\n node = node_factory_function(input_node, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def _build_unary_op(op):\n def unary_op(self):\n \"\"\"`plist` unary operation; applied element-wise to `self`.\n\n `unary_op` is not callable directly from `plist`. It implements the various\n python unary operations: `-`, `~`, `abs`, etc. The unary operators\n can be called directly with their corresponding 'magic' functions,\n `plist.__neg__`, `plist.__invert__`, `plist.__abs__`, etc., but are generally just\n called implicitly.\n\n Examples:\n ```python\n foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])\n (foos.bar == 0).baz = 3 + (foos.bar == 0).foo\n (foos.bar == 1).baz = 6\n\n assert ((-foos.foo).aslist() ==\n [0, -1, -2])\n assert ((~foos.foo).aslist() ==\n [-1, -2, -3])\n\n by_bar = foos.bar.groupby()\n\n assert ((-by_bar.foo).aslist() ==\n [[0, -2], [-1]])\n assert ((~by_bar.foo).aslist() ==\n [[-1, -3], [-2]])\n ```\n\n Returns:\n A new `plist`, where each element of `self` had the operation passed to\n `_build_unary_op` applied to it.\n \"\"\"\n return plist([op(x) for x in self], root=self.__root__)\n\n return unary_op", "def gen_unop(self, expr: expressions.UnaryOperator):\n if expr.op in [\"x++\", \"x--\", \"--x\", \"++x\"]:\n # Increment and decrement in pre and post form\n # Determine increment or decrement:\n op = expr.op[1]\n pre = expr.op[0] == \"x\"\n value = self.gen_inplace_mutation(expr, op, pre)\n elif expr.op == \"*\":\n value = self.gen_expr(expr.a, rvalue=True)\n assert expr.lvalue\n elif expr.op == \"&\":\n assert expr.a.lvalue\n value = self.gen_expr(expr.a, rvalue=False)\n elif expr.op in [\"-\", \"~\"]:\n a = self.gen_expr(expr.a, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n value = self.emit(ir.Unop(expr.op, a, \"unop\", ir_typ))\n elif expr.op in [\"!\"]:\n value = self.gen_condition_to_integer(expr)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def representsUnaryFunction(self, *args):\n return _libsbml.ASTBasePlugin_representsUnaryFunction(self, *args)", "def _remove_operator(self, operator):", "def expand_callable(self, call_expr):\n call_expr.func = ast.Attribute(value=call_expr.func, attr='__call__')", "def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)", "def _calc2call(func):\n def _converter(inp, *x):\n if func.n_inputs == 1:\n retvals = func.evaluate(x[0], *inp)\n else:\n retvals = func.evaluate(x[0], x[1], *inp)\n return retvals\n return _converter", "def test02_unary_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n n += number(10)\n n -= number(10)\n n *= number(10)\n n /= number(2)\n assert n == number(100)\n\n nn = -n;\n assert nn == number(-100)", "def special_unary(sv, tree):\r\n o=tree[0]\r\n if o==Old: # special case: 'old' without argument\r\n return (Old, tree_build(sv, sv.Current_clause), None) # recover object name\r\n \r\n elif o == All: # without args for show\r\n return tree\r\n \r\n elif o in [Begin, End]: # begin(any( )), end(any( )), begin(all( )), end(all( ))\r\n A=tree[1]\r\n if A and A[0] in [Any, All]: # switch operators\r\n sw= (A[0], (o, A[1], None), None)\r\n warn(\"\\n\"+Warn_switch_any_all+ \\\r\n \" '\"+tree_join(tree)+\"' --> '\"+tree_join(sw)+\"'\") # here tree_join recreates a name for display only\r\n # *** Warning: compiler replaced ... *** \r\n return sw\r\n \r\n elif o in Unary and not o in sv.Object: # redefined internal function without args is ok\r\n print(Err_missing_args) # *** Error: missing argument ***\r\n print(o, \"???\", sv.Current_clause)\r\n raise ReferenceError \r\n return tree", "def visit_UnaryOpNode(self, node: UnaryOpNode, symbol_table: SymbolTable) -> Number:\n number = self.visit(node.node, symbol_table)\n\n if node.op_tok.token_type == TokenType.MINUS:\n return number * Number(-1)\n elif node.op_tok.token_type == TokenType.PLUS:\n return number\n elif node.op_tok.value == 'not':\n return number.notted_by()", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)", "def _generate_unary_deferer(op_func):\n\n def deferer(self, *args, **kwargs):\n return type(self)._defer_unary_elementwise(\n self, op_func, *args, **kwargs\n )\n\n return deferer", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def cg_inline_arith(self, fn):\n if fn == 'add':\n op = '+'\n elif fn == 'sub':\n op = '-'\n elif fn == 'and':\n op = '&'\n elif fn == 'or':\n op = '|'\n else:\n raise ValueError(f\"Unknown arithmetic function: {fn}\")\n self.asm(unindent(f\"\"\"\n {self._cg_pop_D}\n @SP\n AM=M-1 // SP--\n M=M{op}D // D = MEM[SP] {op} D\n @SP // SP++\n AM=M+1\n \"\"\"))", "def _arithmetize1(self, operand: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n # Data length might be changed after evaluation\n # operand = recycle_value(operand, self.data.shape[0])\n return op_func(operand)", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)", "def to_op(self):\n raise NotImplementedError", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def visit_unary(spec):", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def _call(self, x):\n return x.ufuncs.sign()", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)", "def visit_BinOp(self, node):\n self.generic_visit(node)\n return to_call(self.op_to_function(node.op), [node.left, node.right])", "def function_application(func):\n if func not in NUMEXPR_MATH_FUNCS:\n raise ValueError(\"Unsupported mathematical function '%s'\" % func)\n\n def mathfunc(self):\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{func}({expr})\".format(func=func, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{func}(x_0)\".format(func=func), (self,))\n return mathfunc", "def unaryFunctionGenerator(op, operationName):\n def unaryFunction(memoryManager, paramsList):\n def unaryOperation(a):\n if a is None:\n return None\n if type(a) is not float:\n raise Exception(\"Cannot {} nested list\".format(operationName))\n return op(a)\n\n handleEmpty(paramsList, operationName)\n A = paramsList[0]\n \n if type(A) == float:\n return unaryOperation(A)\n\n lengthA = len(A)\n\n result = []\n for i in range(lengthA):\n result.append(unaryOperation(A[i]))\n return result\n\n return unaryFunction", "def is_unary(*args):\n return _ida_hexrays.is_unary(*args)", "def unary_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n # TODO: extend to all types\n exprs = ctx.expressions_by_type(int)\n for expr in exprs:\n for unary_operator in self.unary_operators:\n yield AnnotatedExpression(\n ast.UnaryOp(op=unary_operator(), operand=expr.expr),\n TypeAnnotation(int),\n )", "def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def _is_unary_op(op):\n if op.type == TokenType.BitwiseNot:\n return True\n return False", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def visit_UnaryOp(self, node):\n if node and not config.mutated:\n return self.visit_node(node)\n elif node and config.mutated and config.recovering:\n return self.recover_node(node)\n return node", "def evaluate(expression):\n if isinstance(expression, int):\n return expression\n elif isinstance(expression, str): # operator\n try:\n return operators[expression]\n except KeyError:\n raise InvalidOperator(expression)\n else:\n exps = [evaluate(exp) for exp in expression]\n if len(exps) == 0:\n raise NullExpression()\n operator = exps.pop(0)\n if callable(operator):\n if len(exps) == 2:\n arg1, arg2 = exps\n return operator(arg1, arg2)\n elif len(exps) < 2:\n raise MissingArguments()\n else:\n raise TooManyArguments()\n else:\n raise InvalidOperator(operator)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def _unwrap_simple_call(self, node: ast.expr) -> ast.expr:\n if isinstance(node, ast.Call) and len(node.args) == 1 and not node.keywords:\n return self._unwrap_simple_call(node.args[0])\n return node", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def deco_unary_ufunc(torch_func):\n\n @normalizer\n def wrapped(\n x: ArrayLike,\n /,\n out: Optional[OutArray] = None,\n *,\n where=True,\n casting: Optional[CastingModes] = \"same_kind\",\n order=\"K\",\n dtype: Optional[DTypeLike] = None,\n subok: NotImplementedType = False,\n signature=None,\n extobj=None,\n ):\n if dtype is not None:\n x = _util.typecast_tensor(x, dtype, casting)\n\n if torch_func.__name__ in _fp_unary:\n x = _util.cast_int_to_float(x)\n\n result = torch_func(x)\n result = _ufunc_postprocess(result, out, casting)\n return result\n\n wrapped.__qualname__ = torch_func.__name__\n wrapped.__name__ = torch_func.__name__\n\n return wrapped", "def __call__(fun_name):", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def static_call(self, *args):\n return self.expression", "def _operators_conductor(operator_name, _bool=None):\n func = getattr(Series, operator_name)\n if _bool is None:\n # return bool series.\n _pre, _post = bool, bool\n else:\n # return ints.\n _pre, _post = int, int\n\n @wraps(func)\n def operator_method(self, other=None):\n if other is None:\n # for unary such as pos, neg, invert\n def not_(df: dF):\n return func(df.pipe(self.copy().pop())).apply(_post)\n\n return not_\n\n # if not isinstance(other, Condition):\n # raise TypeError(\"only conditions can add, got %r\" % type(other))\n\n def comb(df: dF) -> Series:\n return func(df.pipe(self).apply(_pre), df.pipe(other).apply(_pre)).apply(_post)\n\n return comb\n\n return operator_method", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def do_math(operator, op1, op2):\n if operator == \"*\":\n return op1 * op2\n if operator == \"/\":\n return op1 / op2\n if operator == \"+\":\n return op1 + op2\n if operator == \"-\":\n return op1 - op2\n if operator == \"^\":\n return op1**(op2)", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def _UnaryOperatorVariable(operatorClass=None):\n\n class unOp(operatorClass):\n def _calcValue_(self):\n return self.op(self.var[0].value)\n\n @property\n def unit(self):\n assert(hasattr(self, \"_unit\") == True)\n if self._unit is None:\n try:\n var = self._varProxy\n return self._extractUnit(self.op(var[0]))\n except:\n return self._extractUnit(self._calcValue())\n else:\n return self._unit\n\n return unOp", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def unaryop_type(cls, op):\n return None", "def _BinOp(self, t):\n op_name = t.op.__class__.__name__\n # translate pow into function call (no float version)\n if op_name == \"Pow\":\n self.write(\"pow(\")\n self.dispatch(t.left)\n self.write(\", \")\n self.dispatch(t.right)\n self.write(\")\")\n # translate floor div into function call (no float version)\n elif op_name == \"FloorDiv\":\n self.write(\"floor(\")\n self.dispatch(t.left)\n self.write(\"/\")\n self.dispatch(t.right)\n self.write(\")\")\n elif op_name == \"MatMult\":\n self.RaiseError(t, \"Matrix multiplier operator not supported\")\n else:\n self.write(\"(\")\n self.dispatch(t.left)\n self.write(\" \" + self.binop[op_name] + \" \")\n self.dispatch(t.right)\n self.write(\")\")", "def trans_op_op(self, data):\n\n return self.trans_op(self.op(data))", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def _support_op(*args):\n def inner(func):\n for one_arg in args:\n _op_mapping_[one_arg] = func\n return func\n\n return inner", "def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)", "def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application", "def stream_unary_inline(behavior):\n return _MethodImplementation(\n cardinality.Cardinality.STREAM_UNARY, style.Service.INLINE, None, None,\n behavior, None, None, None, None, None)", "def function(name: str, expr: vecpy.base.Expr, *args) -> vecpy.base.Function:\n return vecpy.base.Function(name, expr, *args)", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def math(oper):\n a=int(request.args.get('a'))\n b=int(request.args.get('b'))\n result = math_oper[oper](a,b)\n return str(result)", "def gate_operation(self, operation, qubit_expr, params=None):\n operation = QuantumCircuit._parse_gate_operation(operation)\n if params is not None:\n operation = operation(**params) \n qubit = self.qr.get(qubit_expr) \n return operation(qubit)", "def convert_to_user_call(*args):\n return _ida_hexrays.convert_to_user_call(*args)", "def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))", "def replace_operators_by_calls(topconstruct, opname, call, call_id_construct):\n # find all computations\n for computation in query([is_computation], TreeItem(topconstruct)):\n replace_op_by_call(computation.construct, opname, call, call_id_construct)", "def execute_binary_operator(cls, val, x, y):\n\n\t\tif val == 0:\n\t\t\treturn operator.add(x,y)\n\t\telif val == 1:\n\t\t\treturn operator.sub(x,y)\n\t\telif val == 2:\n\t\t\treturn operator.mul(x,y)\n\t\telif val == 3:\n\t\t\treturn operator.div(x,y)\n\t\telif val == 4:\n\t\t\treturn operator.lt(x,y)\n\t\telif val == 5:\n\t\t\treturn operator.gt(x,y)\n\t\telif val == 6:\n\t\t\treturn operator.le(x,y)\n\t\telif val == 7:\n\t\t\treturn operator.ge(x,y)\n\t\telif val == 8:\n\t\t\treturn operator.eq(x,y)\n\t\telif val == 9:\n\t\t\treturn operator.ne(x,y)\n\t\telif val == 12:\n\t\t\treturn operator.mod(x,y)", "def _call_op_sugar(self, op_idx, *args):\n if not all(isinstance(a, six.integer_types) for a in args):\n raise TypeError('All args passed to call_op must be integers '\n '(LoomResult ids.) Did you forget to call constant?')\n result = self._weaver.CallOp(op_idx, args)\n if not result:\n raise AssertionError('Weaver op call failed: %s' %\n self._weaver.error_string())\n if len(result) == 1:\n return result[0]\n return result", "def operatorCommand(self, buttonText):\n def applyOperator():\n number = self.digits[\"text\"]\n if number == 'Error':\n return\n if \".\" in number:\n number = float(number)\n else:\n number = int(number)\n self.calculator.applyOperator(buttonText, number)\n self.digits[\"text\"] = str(self.calculator)\n self.operatorEntered = True\n return applyOperator", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def calculate(numbers, operator):\n \n if operator == 'add':\n return add(prepare_numbers(numbers))\n elif operator == 'subtract':\n return subtract(prepare_numbers(numbers))\n elif operator == 'multiply':\n return multiply(prepare_numbers(numbers))\n elif operator == 'divide':\n return divide(prepare_numbers(numbers))\n elif operator == 'remainder':\n return remainder(prepare_numbers(numbers))\n elif operator == 'power':\n return power(prepare_numbers(numbers))", "def unary_unary_inline(behavior):\n return _MethodImplementation(\n cardinality.Cardinality.UNARY_UNARY, style.Service.INLINE, behavior,\n None, None, None, None, None, None, None)", "def special_math_func(state, other, operator):\n if not hasattr(other, '__iter__'):\n # other is just a number\n results = [getattr(state[each], operator)(other)\n for each in state.keys()]\n else:\n try:\n # Both are dictionaries\n results = [getattr(state[each], operator)(other[each])\n for each in state]\n except IndexError:\n # Both are iterables, but other is not a dictionary\n results = [getattr(state[i], operator)(j)\n for i, j in zip(state, other)]\n out = State(zip(state.keys(), results))\n return out", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node" ]
[ "0.72016424", "0.694165", "0.6555362", "0.6547756", "0.64053464", "0.63873965", "0.63089716", "0.6237462", "0.6215301", "0.6152752", "0.61284393", "0.6099685", "0.5980428", "0.5957063", "0.5941364", "0.59279454", "0.59157944", "0.590933", "0.5902454", "0.5870478", "0.58691585", "0.5865945", "0.57783", "0.5753523", "0.57420677", "0.57209545", "0.5717904", "0.5699034", "0.56815207", "0.5658828", "0.5655511", "0.56420946", "0.56101966", "0.5604802", "0.56017", "0.5587024", "0.5584051", "0.55763555", "0.5573452", "0.5565016", "0.5553095", "0.5549253", "0.5517259", "0.5497357", "0.5489802", "0.54879904", "0.5482167", "0.54804826", "0.54710734", "0.5440136", "0.54303193", "0.54262", "0.5423181", "0.5413084", "0.54064", "0.5405636", "0.5405605", "0.5404275", "0.5394554", "0.538641", "0.5385492", "0.5383584", "0.5376527", "0.53641313", "0.5361967", "0.5360073", "0.535996", "0.53231364", "0.5321936", "0.5300452", "0.52997816", "0.5298805", "0.5284563", "0.52833945", "0.5280873", "0.5279954", "0.52709943", "0.52700555", "0.5263678", "0.5246284", "0.5239074", "0.5228926", "0.5222601", "0.52203786", "0.5211521", "0.52087915", "0.5201748", "0.51986", "0.51959294", "0.5191496", "0.5184245", "0.51822263", "0.5177372", "0.5168685", "0.51661754", "0.5163292", "0.515384", "0.5146502", "0.5142354", "0.51421624" ]
0.68839115
2
Convert binary operator to function call.
def visit_BinOp(self, node): self.generic_visit(node) return to_call(self.op_to_function(node.op), [node.left, node.right])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def _BinOp(self, t):\n op_name = t.op.__class__.__name__\n # translate pow into function call (no float version)\n if op_name == \"Pow\":\n self.write(\"pow(\")\n self.dispatch(t.left)\n self.write(\", \")\n self.dispatch(t.right)\n self.write(\")\")\n # translate floor div into function call (no float version)\n elif op_name == \"FloorDiv\":\n self.write(\"floor(\")\n self.dispatch(t.left)\n self.write(\"/\")\n self.dispatch(t.right)\n self.write(\")\")\n elif op_name == \"MatMult\":\n self.RaiseError(t, \"Matrix multiplier operator not supported\")\n else:\n self.write(\"(\")\n self.dispatch(t.left)\n self.write(\" \" + self.binop[op_name] + \" \")\n self.dispatch(t.right)\n self.write(\")\")", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def execute_binary_operator(cls, val, x, y):\n\n\t\tif val == 0:\n\t\t\treturn operator.add(x,y)\n\t\telif val == 1:\n\t\t\treturn operator.sub(x,y)\n\t\telif val == 2:\n\t\t\treturn operator.mul(x,y)\n\t\telif val == 3:\n\t\t\treturn operator.div(x,y)\n\t\telif val == 4:\n\t\t\treturn operator.lt(x,y)\n\t\telif val == 5:\n\t\t\treturn operator.gt(x,y)\n\t\telif val == 6:\n\t\t\treturn operator.le(x,y)\n\t\telif val == 7:\n\t\t\treturn operator.ge(x,y)\n\t\telif val == 8:\n\t\t\treturn operator.eq(x,y)\n\t\telif val == 9:\n\t\t\treturn operator.ne(x,y)\n\t\telif val == 12:\n\t\t\treturn operator.mod(x,y)", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def to_op(self):\n raise NotImplementedError", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def _calc2call(func):\n def _converter(inp, *x):\n if func.n_inputs == 1:\n retvals = func.evaluate(x[0], *inp)\n else:\n retvals = func.evaluate(x[0], x[1], *inp)\n return retvals\n return _converter", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))", "def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def _operators_conductor(operator_name, _bool=None):\n func = getattr(Series, operator_name)\n if _bool is None:\n # return bool series.\n _pre, _post = bool, bool\n else:\n # return ints.\n _pre, _post = int, int\n\n @wraps(func)\n def operator_method(self, other=None):\n if other is None:\n # for unary such as pos, neg, invert\n def not_(df: dF):\n return func(df.pipe(self.copy().pop())).apply(_post)\n\n return not_\n\n # if not isinstance(other, Condition):\n # raise TypeError(\"only conditions can add, got %r\" % type(other))\n\n def comb(df: dF) -> Series:\n return func(df.pipe(self).apply(_pre), df.pipe(other).apply(_pre)).apply(_post)\n\n return comb\n\n return operator_method", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def gate_operation(self, operation, qubit_expr, params=None):\n operation = QuantumCircuit._parse_gate_operation(operation)\n if params is not None:\n operation = operation(**params) \n qubit = self.qr.get(qubit_expr) \n return operation(qubit)", "def __call__(fun_name):", "def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def do_math(operator, op1, op2):\n if operator == \"*\":\n return op1 * op2\n if operator == \"/\":\n return op1 / op2\n if operator == \"+\":\n return op1 + op2\n if operator == \"-\":\n return op1 - op2\n if operator == \"^\":\n return op1**(op2)", "def __call__(self, *args, **kw):\n return self.callable(*args, **kw)", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def cg_inline_unary(self, fn):\n if fn == 'neg':\n op = '-'\n elif fn == 'not':\n op = '!'\n else:\n raise ValueError(f\"Unknown unary operator: {fn}\")\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP--\n D={op}M // D = MEM[SP]\n {self._cg_push_D}\n \"\"\"))", "def _support_op(*args):\n def inner(func):\n for one_arg in args:\n _op_mapping_[one_arg] = func\n return func\n\n return inner", "def RunOperator(op_def, verbose=False):\n if isinstance(op_def, pb.OperatorDef):\n op_def = op_def.SerializeToString()\n _C.RunOperator(op_def, verbose)", "def math(oper):\n a=int(request.args.get('a'))\n b=int(request.args.get('b'))\n result = math_oper[oper](a,b)\n return str(result)", "def callable(space, w_object):\n return space.callable(w_object)", "def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)", "def __call__(self, *args):\n assert is_symbol(self.op) and not self.args\n return Expr(self.op, *args)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def binary(op, l, r):\n if op == \"+\": return l + r\n if op == \"*\": return l * r\n if op == \"-\": return l - r\n if op == \"=\": return l == r\n if op == \"<>\": return l != r\n if op == \"!=\": return l != r\n if op == \"or\": return l or r\n if op == \"<\": return l < r\n if op == \">\": return l > r\n if op == \"/\": return l / r\n if op == \"and\": return bool(l and r)\n if op == \"in\": return l in r\n if op == \"==\": return l == r\n if op == \"<=\": return l <= r\n if op == \">=\": return l >= r\n raise Exception(\"binary op not implemented\")", "def on_apply(self, node):\n if node.inputs[0].is_constant(Primitive):\n fn = node.inputs[0].value\n conv = MAP.get(fn)\n if conv is not None:\n return conv(self, *node.inputs[1:])\n return relay.Call(self.ref(node.inputs[0]),\n [self.ref(i) for i in node.inputs[1:]])", "def __call__(self, func: Callable) -> Callable:\n NAME_TO_SYMBOL[self._exported_name] = Symbol.from_callable(\n self._exported_name, func)\n return func", "def expand_callable(self, call_expr):\n call_expr.func = ast.Attribute(value=call_expr.func, attr='__call__')", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def op(\n self,\n opstring: str,\n precedence: int = 0,\n is_comparison: bool = False,\n return_type: Optional[\n Union[Type[TypeEngine[Any]], TypeEngine[Any]]\n ] = None,\n python_impl: Optional[Callable[..., Any]] = None,\n ) -> Callable[[Any], Operators]:\n operator = custom_op(\n opstring,\n precedence,\n is_comparison,\n return_type,\n python_impl=python_impl,\n )\n\n def against(other: Any) -> Operators:\n return operator(self, other) # type: ignore\n\n return against", "def convert_binary_logical_op(g, op, block):\n\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n op_func = get_relay_op(op.type)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def convert_unary_op(g, op, block):\n\n # op_map stores mapping relationship between paddlepaddle and relay\n op_map = {\"isinf_v2\": _op.isinf, \"isfinite_v2\": _op.isfinite, \"isnan_v2\": _op.isnan}\n if op.type in op_map:\n unary_func = op_map[op.type]\n else:\n # while paddle operator's name is same with relay\n unary_func = get_relay_op(op.type)\n out = unary_func(g.get_node(op.input(\"X\")[0]))\n g.add_node(op.output(\"Out\")[0], out)", "def op(self):\n return self.__op", "def op(self):\n return self.__op", "def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator", "def _arithmetize1(self, operand: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n # Data length might be changed after evaluation\n # operand = recycle_value(operand, self.data.shape[0])\n return op_func(operand)", "def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def to_instruction(self):\n return self.to_circuit().to_gate()", "def special_math_func(state, other, operator):\n if not hasattr(other, '__iter__'):\n # other is just a number\n results = [getattr(state[each], operator)(other)\n for each in state.keys()]\n else:\n try:\n # Both are dictionaries\n results = [getattr(state[each], operator)(other[each])\n for each in state]\n except IndexError:\n # Both are iterables, but other is not a dictionary\n results = [getattr(state[i], operator)(j)\n for i, j in zip(state, other)]\n out = State(zip(state.keys(), results))\n return out", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def call_lua_op(op_name, *args):\n\n # convert the python objects to lua\n args_table = util.pack_args(args)\n try:\n func = executor.eval_lua(q_consts.lua_op_fn_str)\n result = func(op_name, args_table)\n except Exception as e:\n # TODO: Handle operator level failures properly\n print(str(e))\n result = None\n if result:\n # wrap the lua objects to python\n result = util.wrap_output(op_name, result)\n return result", "def __call__(a, b):", "def calculate(numbers, operator):\n \n if operator == 'add':\n return add(prepare_numbers(numbers))\n elif operator == 'subtract':\n return subtract(prepare_numbers(numbers))\n elif operator == 'multiply':\n return multiply(prepare_numbers(numbers))\n elif operator == 'divide':\n return divide(prepare_numbers(numbers))\n elif operator == 'remainder':\n return remainder(prepare_numbers(numbers))\n elif operator == 'power':\n return power(prepare_numbers(numbers))", "def trans_op_op(self, data):\n\n return self.trans_op(self.op(data))", "def _remove_operator(self, operator):", "def getCallable():", "def operatorCommand(self, buttonText):\n def applyOperator():\n number = self.digits[\"text\"]\n if number == 'Error':\n return\n if \".\" in number:\n number = float(number)\n else:\n number = int(number)\n self.calculator.applyOperator(buttonText, number)\n self.digits[\"text\"] = str(self.calculator)\n self.operatorEntered = True\n return applyOperator", "def rhs_as_python_func(self, namespace=None):\n namespace = namespace or {}\n\n return eval(\"lambda %s: %s\" % (','.join(self.rhs_names), self.rhs),\n str_to_npfunc_map, namespace)\n # math_namespace.namespace, namespace)", "def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]", "def execute_operation(self, frac1, operator, frac2):\n try:\n ops = {\"+\": (lambda x, y: Frac(x) + Frac(y)), \"-\": (lambda x, y: Frac(x) - Frac(y)), \"*\":\n (lambda x, y: Frac(x) * Frac(y)), \"/\": (lambda x, y: Frac(x) / Frac(y))}\n if operator in ops:\n return str(ops[operator](frac1, frac2))\n else:\n print(self.operator_not_Valid)\n return False\n except Exception as e:\n print(self.operator_not_Valid, e)\n return False", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def static_call(self, *args):\n return self.expression", "def operate(\n self, op: OperatorType, *other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application", "def evaluate(expression):\n if isinstance(expression, int):\n return expression\n elif isinstance(expression, str): # operator\n try:\n return operators[expression]\n except KeyError:\n raise InvalidOperator(expression)\n else:\n exps = [evaluate(exp) for exp in expression]\n if len(exps) == 0:\n raise NullExpression()\n operator = exps.pop(0)\n if callable(operator):\n if len(exps) == 2:\n arg1, arg2 = exps\n return operator(arg1, arg2)\n elif len(exps) < 2:\n raise MissingArguments()\n else:\n raise TooManyArguments()\n else:\n raise InvalidOperator(operator)", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def operator(app):\n return car(app)", "def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)", "def _op_easy(self, op, reg_list, param_list=None): # pylint: disable-msg=invalid-name\n\n has_op = hasattr(self.circuit, op)\n\n if has_op:\n if param_list:\n # DEBUG\n # print(\"********** op {} param_list {} reg_list {}\".format(op, param_list, reg_list)) # pylint: disable-msg=line-too-long\n # END-DEBUG\n getattr(self.circuit, op)(*param_list, *reg_list)\n else:\n getattr(self.circuit, op)(*reg_list)\n\n return has_op", "def convert_broadcast_power(node, **kwargs):\n return create_basic_op_node('Pow', node, kwargs)", "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", "def binary_operator_string(self, binary):\n return binary.operator == '%' and 'mod' or binary.operator", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def call(self, x, mask=None):", "def _call_op_sugar(self, op_idx, *args):\n if not all(isinstance(a, six.integer_types) for a in args):\n raise TypeError('All args passed to call_op must be integers '\n '(LoomResult ids.) Did you forget to call constant?')\n result = self._weaver.CallOp(op_idx, args)\n if not result:\n raise AssertionError('Weaver op call failed: %s' %\n self._weaver.error_string())\n if len(result) == 1:\n return result[0]\n return result", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def _apply_binary_op_elementwise(\n self: ConcreteStructuredMetricValue, other: ConcreteStructuredMetricValue,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...", "def exec_builtin(self, cmd):\r\n func = Builtin.builtins.get(cmd[0])\r\n if func is None:\r\n return False\r\n func(self, cmd)\r\n return True", "def cg_inline_arith(self, fn):\n if fn == 'add':\n op = '+'\n elif fn == 'sub':\n op = '-'\n elif fn == 'and':\n op = '&'\n elif fn == 'or':\n op = '|'\n else:\n raise ValueError(f\"Unknown arithmetic function: {fn}\")\n self.asm(unindent(f\"\"\"\n {self._cg_pop_D}\n @SP\n AM=M-1 // SP--\n M=M{op}D // D = MEM[SP] {op} D\n @SP // SP++\n AM=M+1\n \"\"\"))", "def _apply_binary_op_broadcast(\n self: ConcreteStructuredMetricValue, other: float,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...", "def RunOperators(ops_def):\n RunOperatorsCC([_stringify_proto(op_def) for op_def in ops_def])", "def operator(self):\n return self.__operator", "def callable_(arg: str) -> str:\n return '! %r !' % arg", "def _UnaryOp(self, t):\n self.write(\"(\")\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(\")\")", "def visit_UnaryOp(self, node):\n self.generic_visit(node)\n if isinstance(node.operand, ast.Num):\n # Don't transform negations of numeric literals. Just treat them\n # as literals.\n return node\n return to_call(self.op_to_function(node.op), [node.operand])", "def calculate(A, B, operator): # HELPER\n base_dict = {\n '+' : float(A) + float(B),\n '-' : float(A) - float(B),\n '*' : float(A) * float(B),\n '/' : float(A) / float(B), \n '**': float(A) **float(B), \n '<<': float(A) * (2**float(B)), # left shift\n '>>': float(A) / (2**float(B)) # right shift\n }\n return base_dict[operator]", "def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node", "def reverse_operate(\n self, op: OperatorType, other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))" ]
[ "0.69218695", "0.67679876", "0.66050005", "0.6560225", "0.6488998", "0.63463247", "0.6271635", "0.62432164", "0.62399954", "0.61941004", "0.61931217", "0.6169983", "0.6155207", "0.6142081", "0.6140598", "0.6131111", "0.61122173", "0.61021495", "0.60998577", "0.6044439", "0.60384125", "0.60383713", "0.602016", "0.60146534", "0.600851", "0.60041237", "0.59838444", "0.59325176", "0.5907455", "0.5895891", "0.58673525", "0.5851982", "0.5849257", "0.58440155", "0.5831971", "0.58039755", "0.5803796", "0.58024484", "0.57756865", "0.5774621", "0.576195", "0.5752215", "0.57369655", "0.5723992", "0.5679997", "0.56787354", "0.56470823", "0.5646945", "0.56322694", "0.56316584", "0.56144774", "0.56077474", "0.56060773", "0.55982506", "0.55982506", "0.5578665", "0.55693763", "0.5561392", "0.5556769", "0.5540374", "0.55338734", "0.55178326", "0.55166775", "0.5515114", "0.55147356", "0.55113447", "0.5511069", "0.5509939", "0.55059236", "0.5498835", "0.54904914", "0.54903257", "0.54866064", "0.54831666", "0.5481427", "0.54797935", "0.547457", "0.54708827", "0.546757", "0.5462319", "0.54608345", "0.5453648", "0.5450068", "0.54433286", "0.54417217", "0.54410017", "0.54311377", "0.54245377", "0.5415455", "0.54137886", "0.54127866", "0.54099977", "0.54077494", "0.5406808", "0.5391187", "0.53890616", "0.5387787", "0.53832954", "0.5382106", "0.536104" ]
0.6633928
2
Convert comparison operator to function call.
def visit_Compare(self, node): self.generic_visit(node) if len(node.ops) > 1: raise NotImplementedError("Multiple comparisons not supported") op, comparator = node.ops[0], node.comparators[0] if isinstance(op, ast.In): # Special case: `contains` reverses the operands. return to_call(to_attribute(self.operator, 'contains'), [comparator, node.left]) elif isinstance(op, ast.NotIn): # Special case: there is no `not_contains`. return to_call(to_attribute(self.operator, 'not_'), [ to_call(to_attribute(self.operator, 'contains'), [comparator, node.left]) ]) else: # General case return to_call(self.op_to_function(op), [node.left, comparator])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self, operator, value, **kw):\n\n return operator(self.comparator, value)", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n return Boolean(False)\n return Boolean(True)\n else:\n return Boolean(True)\n return comp", "def dynamic_comparison(v1, op, v2):\n assert op in ['gt', 'lt']\n\n operator_map = {'gt': operator.gt,\n 'lt': operator.lt}\n\n return operator_map[op](v1, v2)", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func", "def _less_than_or_equal_to_op(spec):", "def operator(self, sort):\r\n return None", "def _less_than_op(spec):", "def _comparisonFunction(comp):\n # ---- DECPRECATION SECTION ----\n warnStr = (\"Using deprecated function '_comparisonFunction'. \"\n \"Use '_comparison_function' instead.\")\n warnings.warn(warnStr, DeprecationWarning, stacklevel=3)\n # ------------------------------\n\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '%s'.\" % (comp))\n\n return func", "def less_than_or_equal(self) -> global___Expression:", "def ops(rule):\n ops_dict = {'>' : operator.gt,\n '<' : operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=' : operator.eq,\n '==' : operator.eq}\n return ops_dict[rule]", "def compare(self, *args):\n return _ida_hexrays.operand_locator_t_compare(self, *args)", "def cg_inline_cmp(self, fn):\n if fn == 'lt':\n jmp = 'JLT'\n elif fn == 'eq':\n jmp = 'JEQ'\n elif fn == 'gt':\n jmp = 'JGT'\n else:\n raise ValueError(f\"Unknown cmp function: {fn}\")\n true_branch = self.uniqueLabel() + '-cmp-' + fn\n done = self.uniqueLabel() + '-cmp-done'\n self.asm(unindent(f\"\"\"\n {self._cg_pop_D}\n @SP\n AM=M-1 // SP--\n D=M-D\n @{true_branch}\n D;{jmp} // Comparing x {fn} y, so x-y;{jmp}\n @SP\n AM=M\n M=0 // !{fn}: MEM[SP] = 0 (false)\n @{done}\n 0;JMP\n ({true_branch})\n @SP\n AM=M\n M=-1 // {fn}: MEM[SP] = -1 (true)\n ({done})\n @SP\n AM=M+1 // SP++\n \"\"\"))", "def _greater_than_or_equal_to_op(spec):", "def less_than(self) -> global___Expression:", "def execute_binary_operator(cls, val, x, y):\n\n\t\tif val == 0:\n\t\t\treturn operator.add(x,y)\n\t\telif val == 1:\n\t\t\treturn operator.sub(x,y)\n\t\telif val == 2:\n\t\t\treturn operator.mul(x,y)\n\t\telif val == 3:\n\t\t\treturn operator.div(x,y)\n\t\telif val == 4:\n\t\t\treturn operator.lt(x,y)\n\t\telif val == 5:\n\t\t\treturn operator.gt(x,y)\n\t\telif val == 6:\n\t\t\treturn operator.le(x,y)\n\t\telif val == 7:\n\t\t\treturn operator.ge(x,y)\n\t\telif val == 8:\n\t\t\treturn operator.eq(x,y)\n\t\telif val == 9:\n\t\t\treturn operator.ne(x,y)\n\t\telif val == 12:\n\t\t\treturn operator.mod(x,y)", "def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)", "def _equal_to_op(spec):", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)", "def _builtin_le(arg1, arg2, engine=None, **k):\n check_mode((arg1, arg2), ['gg'], functor='=<', **k)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value <= b_value", "def _builtin_lt(arg1, arg2, engine=None, **kwdargs):\n check_mode((arg1, arg2), ['gg'], functor='<', **kwdargs)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value < b_value", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def __gt__(self, *args):\n return _ida_hexrays.operand_locator_t___gt__(self, *args)", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def __ge__(self, other: Any) -> ColumnOperators:\n return self.operate(ge, other)", "def test03_comparison_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n assert (number(20) > number(10)) == True\n assert (number(20) < number(10)) == False\n assert (number(20) >= number(20)) == True\n assert (number(20) <= number(10)) == False\n assert (number(20) != number(10)) == True\n assert (number(20) == number(10)) == False", "def greater_than_or_equal(self) -> global___Expression:", "def operator(self):\n return self.__operator", "def __lt__(self, *args):\n return _ida_hexrays.operand_locator_t___lt__(self, *args)", "def _cmp_dispatcher(other_method_name):\n\n def dispatched_cmp(self, other):\n try:\n other_method = getattr(other, other_method_name)\n except AttributeError:\n return False\n return other_method(self)\n return dispatched_cmp", "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def compare(self, *args):\n return _ida_hexrays.cexpr_t_compare(self, *args)", "def comparator(self) -> typing.Callable[[Vec, Vec, Term], bool]:\n pass", "def _greater_than_op(spec):", "def bert_binop(vec1, vec2, operator):\n assert (len(vec1)==len(vec2)==1024), \"Vectors must both have length of 1024\"\n if (operator=='+'):\n return np.add(vec1, vec2)\n elif (operator=='-'):\n return np.subtract(vec1, vec2)\n elif (operator=='?'):\n return euclidean(vec1, vec2)\n elif (operator=='=='):\n return (vec1 == vec2)\n else:\n raise ValueError(f\"Invalid operator '{operator}'\")", "def bert_binop(vec1, vec2, operator):\n assert (len(vec1)==len(vec2)==1024), \"Vectors must both have length of 1024\"\n if (operator=='+'):\n return np.add(vec1, vec2)\n elif (operator=='-'):\n return np.subtract(vec1, vec2)\n elif (operator=='?'):\n return euclidean(vec1, vec2)\n elif (operator=='=='):\n return (vec1 == vec2)\n else:\n raise ValueError(f\"Invalid operator '{operator}'\")", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def is_comparison_op(self):\r\n return self.value in [\"=\", \"!=\", \"<\", \"<=\", \">\", \">=\"]", "def _builtin_ge(arg1, arg2, engine=None, **k):\n check_mode((arg1, arg2), ['gg'], functor='>=', **k)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value >= b_value", "def bool_op(\n self,\n opstring: str,\n precedence: int = 0,\n python_impl: Optional[Callable[..., Any]] = None,\n ) -> Callable[[Any], Operators]:\n return self.op(\n opstring,\n precedence=precedence,\n is_comparison=True,\n python_impl=python_impl,\n )", "def all_compare_operators(request: Any) -> Any:\n return request.param", "def all_compare_operators(request: Any) -> Any:\n return request.param", "def _generate_cmp_invocation(self, op, lineno, offset):\n to_insert = []\n start_offset = offset\n const_atheris = self._get_const(sys.modules[TARGET_MODULE])\n name_cmp = self._get_name(COMPARE_FUNCTION)\n const_op = self._get_const(op)\n const_pc = self._get_pc()\n const_False = self._get_const(False)\n\n to_insert.append(\n Instruction(lineno, offset, dis.opmap[\"LOAD_CONST\"], const_atheris))\n offset += to_insert[-1].get_size()\n to_insert.append(\n Instruction(lineno, offset, dis.opmap[\"LOAD_ATTR\"], name_cmp))\n offset += to_insert[-1].get_size()\n to_insert.append(Instruction(lineno, offset, dis.opmap[\"ROT_THREE\"]))\n offset += to_insert[-1].get_size()\n to_insert.append(\n Instruction(lineno, offset, dis.opmap[\"LOAD_CONST\"], const_op))\n offset += to_insert[-1].get_size()\n to_insert.append(\n Instruction(lineno, offset, dis.opmap[\"LOAD_CONST\"], const_pc))\n offset += to_insert[-1].get_size()\n to_insert.append(\n Instruction(lineno, offset, dis.opmap[\"LOAD_CONST\"], const_False))\n offset += to_insert[-1].get_size()\n to_insert.append(Instruction(lineno, offset, dis.opmap[\"CALL_FUNCTION\"], 5))\n offset += to_insert[-1].get_size()\n\n return offset - start_offset, to_insert", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)", "def compare(self, *args):\n return _ida_hexrays.carglist_t_compare(self, *args)", "def _builtin_gt(arg1, arg2, engine=None, **kwdargs):\n check_mode((arg1, arg2), ['gg'], functor='>', **kwdargs)\n a_value = arg1.compute_value(engine.functions)\n b_value = arg2.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value > b_value", "def _default_eval_func(a, b):\n emphasis = \"r2\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def op(\n self,\n opstring: str,\n precedence: int = 0,\n is_comparison: bool = False,\n return_type: Optional[\n Union[Type[TypeEngine[Any]], TypeEngine[Any]]\n ] = None,\n python_impl: Optional[Callable[..., Any]] = None,\n ) -> Callable[[Any], Operators]:\n operator = custom_op(\n opstring,\n precedence,\n is_comparison,\n return_type,\n python_impl=python_impl,\n )\n\n def against(other: Any) -> Operators:\n return operator(self, other) # type: ignore\n\n return against", "def _operators_conductor(operator_name, _bool=None):\n func = getattr(Series, operator_name)\n if _bool is None:\n # return bool series.\n _pre, _post = bool, bool\n else:\n # return ints.\n _pre, _post = int, int\n\n @wraps(func)\n def operator_method(self, other=None):\n if other is None:\n # for unary such as pos, neg, invert\n def not_(df: dF):\n return func(df.pipe(self.copy().pop())).apply(_post)\n\n return not_\n\n # if not isinstance(other, Condition):\n # raise TypeError(\"only conditions can add, got %r\" % type(other))\n\n def comb(df: dF) -> Series:\n return func(df.pipe(self).apply(_pre), df.pipe(other).apply(_pre)).apply(_post)\n\n return comb\n\n return operator_method", "def all_compare_operators(request):\n return request.param", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def str_to_operator(s):\n return {\n # https://docs.python.org/3/library/operator.html#mapping-operators-to-functions\n \"<\": operator.lt,\n \"<=\": operator.le,\n \"==\": operator.eq,\n \"!=\": operator.ne,\n \">=\": operator.ge,\n \">\": operator.gt,\n }[s]", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def comparator(self) -> Operator:\n return self.__comparator", "def cmp_to_key(cmp_fun, model):\n class K:\n def __init__(self, obj, *args):\n self.obj = obj\n def __lt__(self, other):\n return cmp_fun(self.obj, other.obj, model) < 0\n def __gt__(self, other):\n return cmp_fun(self.obj, other.obj, model) > 0\n def __eq__(self, other):\n return cmp_fun(self.obj, other.obj, model) == 0\n def __le__(self, other):\n return cmp_fun(self.obj, other.obj, model) <= 0\n def __ge__(self, other):\n return cmp_fun(self.obj, other.obj, model) >= 0\n def __ne__(self, other):\n return cmp_fun(self.obj, other.obj, model) != 0\n return K", "def transformCompare(*args, root: bool=True, **kwargs)->int:\n pass", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def to_condition(operator: str, value: Any) -> CellCondition:\n operator = str(operator).lower().strip()\n condition = {\n \">\": lambda x: x is not None and x > value,\n \"<\": lambda x: x is not None and x < value,\n \">=\": lambda x: x is not None and x >= value,\n \"<=\": lambda x: x is not None and x <= value,\n \"==\": lambda x: x == value,\n \"!=\": lambda x: x != value,\n \"is\": lambda x: x is value,\n \"not is\": lambda x: x is not value,\n \"contains\": lambda x: x is not None and value in x,\n \"not contains\": lambda x: x is not None and value not in x,\n \"in\": lambda x: x in value,\n \"not in\": lambda x: x not in value,\n }.get(operator)\n\n if not condition:\n raise ValueError(f\"Unknown operator: {operator}\")\n\n return condition", "def greater_than(self) -> global___Expression:", "def vm_cmp(vm_state: VmState, *args, op_bytecode=None, **kwargs) -> VmState:\n op_code, arg1_type, arg1, arg2_type, arg2 = op_bytecode\n\n assert VM_OPERATION_TO_BYTECODE[op_code] == \"CMP\"\n\n if arg2_type == 2: # Register\n right_value = vm_state.vm_registers[arg2].value\n\n elif arg2_type == 3: # Register pointer\n input_value_addr = vm_state.vm_registers[arg2].value\n right_value = vm_state.vm_memory[input_value_addr]\n\n elif arg2_type == 4: # In-place value\n right_value = arg2\n\n else:\n raise Exception(f\"Bad argument for CMP\")\n\n if arg1_type == 2: # Register\n left_value = vm_state.vm_registers[arg1].value\n\n elif arg1_type == 3: # RegisterPointer\n mem_index = vm_state.vm_registers[arg1].value\n left_value = vm_state.vm_memory[mem_index]\n\n elif arg1_type == 4: # In-place value\n left_value = arg1\n\n else:\n raise Exception(f\"Bad argument on CMP\")\n\n if left_value > right_value:\n vm_state.vm_registers[7].value = True\n vm_state.vm_registers[8].value = True\n elif left_value < right_value:\n vm_state.vm_registers[6].value = True\n vm_state.vm_registers[8].value = True\n elif left_value == right_value:\n vm_state.vm_registers[5].value = True\n vm_state.vm_registers[6].value = False\n vm_state.vm_registers[7].value = False\n vm_state.vm_registers[8].value = False\n\n return vm_state", "def operator(self) -> str:\n return self._operator", "def __gt__(self, *args):\n return _ida_hexrays.cexpr_t___gt__(self, *args)", "def getop(op):\n # y is search argument, x is the record's value\n ops = {\n \"==\": lambda y,x: x == y,\n \"!=\": lambda y,x: x != y,\n \">\": lambda y,x: x > y,\n \"<\": lambda y,x: x < y,\n \">=\": lambda y,x: x >= y,\n \"<=\": lambda y,x: x <= y,\n 'any': lambda y,x: x != None,\n 'noop': lambda y,x: True,\n 'starts': lambda y,x: unicode(y).lower() in unicode(x).lower(),\n }\n return ops[SYNONYMS.get(op, op)]", "def compare(*args):\n return _ida_hexrays.compare(*args)", "def _default_eval_func(a, b):\n emphasis = \"accuracy\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def as_function(relation, opposite=False):\n\n greater_than = lambda x, y: x > y\n less_than = lambda x, y: x < y\n equal = lambda x, y: x == y\n not_equal = lambda x, y: not equal(x, y)\n if relation == Relation.greater_than:\n if not opposite:\n return greater_than\n else:\n return less_than\n elif relation == Relation.less_than:\n if not opposite:\n return less_than\n else:\n return greater_than\n elif relation == Relation.equal:\n if not opposite:\n return equal\n else:\n return not_equal\n elif relation == Relation.not_equal:\n if not opposite:\n return not_equal\n else:\n return equal\n else:\n raise ValueError('\"' + relation + '\" is not a known relation.')", "def compare(a, b):\n if a > b:\n return a\n return b", "def __eq__(self, other):\n if self.op is None:\n if all(x.isdigit() for x in self.version):\n return self.relative_eq(other)\n elif 'x' in self.version:\n return self.compare_wild_card(other)\n elif self.version.startswith('*'):\n return True\n elif '~' == self.op:\n return self.compare_approximate(other)\n elif '<=' == self.op:\n return self.relative_lte(other)\n elif '<' == self.op:\n return self.relative_lt(other)\n elif '>=' == self.op:\n return self.relative_gte(other)\n elif '>' == self.op:\n return self.relative_gt(other)\n elif '^' == self.op:\n return self.compare_compatible(other)\n raise Exception(\n \"invalid comparison between {0} {1}\".format(self, other)\n )", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def compare(self, *args):\n return _ida_hexrays.casm_t_compare(self, *args)", "def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False", "def isOperator(self, *args):\n return _libsbml.ASTBasePlugin_isOperator(self, *args)", "def predicate (self, qx) :\n lhs = qx (self.lhs)\n op = self.op\n name = op.__name__\n op = _Op_Map_.get (name, op)\n return lhs._op_call (name, op, * self.args, ** self.kw)", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def __ge__(self, *args):\n return _ida_hexrays.operand_locator_t___ge__(self, *args)", "def _override_operator(class_object, operator, func):\n existing = getattr(class_object, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)) and not isinstance(existing, type(object.__repr__)):\n raise ValueError(\"operator %s cannot be overwritten again on class %s.\" %(operator, class_object))\n setattr(class_object, operator, func)", "def cmp(a, b):\n return (a > b) - (a < b)", "def test_operator_rendering(self):\r\n self.assertEqual(\"=\", unicode(EqualsOperator()))\r\n self.assertEqual(\"IN\", unicode(InOperator()))\r\n self.assertEqual(\">\", unicode(GreaterThanOperator()))\r\n self.assertEqual(\">=\", unicode(GreaterThanOrEqualOperator()))\r\n self.assertEqual(\"<\", unicode(LessThanOperator()))\r\n self.assertEqual(\"<=\", unicode(LessThanOrEqualOperator()))", "def compare(self, *args):\n return _ida_hexrays.cinsn_t_compare(self, *args)", "def compare(value1, operation, value2):\n if operation == None:\n return False\n \n operation = operation.upper()\n\n if operation in [\"=\", \"==\", \"EQ\"]:\n return ObjectComparator.are_equal(value1, value2)\n if operation in [\"!=\", \"<>\", \"NE\"]:\n return ObjectComparator.are_not_equal(value1, value2)\n if operation in [\"<\", \"LT\"]:\n return ObjectComparator.less(value1, value2)\n if operation in [\"<=\", \"LE\"]:\n return ObjectComparator.are_equal(value1, value2) or ObjectComparator.less(value1, value2)\n if operation in [\">\", \"GT\"]:\n return ObjectComparator.more(value1, value2)\n if operation in [\">=\", \"GE\"]:\n return ObjectComparator.are_equal(value1, value2) or ObjectComparator.more(value1, value2)\n if operation == \"LIKE\":\n return ObjectComparator.match(value1, value2)\n\n return True", "def compare(self, *args):\n return _ida_hexrays.fnumber_t_compare(self, *args)", "def test_operator_rendering(self):\n self.assertEqual(\"=\", six.text_type(EqualsOperator()))\n self.assertEqual(\"IN\", six.text_type(InOperator()))\n self.assertEqual(\">\", six.text_type(GreaterThanOperator()))\n self.assertEqual(\">=\", six.text_type(GreaterThanOrEqualOperator()))\n self.assertEqual(\"<\", six.text_type(LessThanOperator()))\n self.assertEqual(\"<=\", six.text_type(LessThanOrEqualOperator()))", "def compare(a, b):\n # Your function body should begin here.\n pass", "def operator(self):\n return self.data.get('operator', 'and')", "def operator(self):\n return self.data.get('operator', 'and')", "def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)", "def __lt__(self, *args):\n return _ida_hexrays.cexpr_t___lt__(self, *args)", "def where(self, value, operator=\">\"):\n assert operator in self.operators\n return f\"\"\"\nf_cast_isots(json->>'{sqlq(self.name)}') {sqlq(operator)} ${{arg}}::{sqlq(self.cast_type)}\"\"\"", "def compare(self, *args):\n return _ida_hexrays.cswitch_t_compare(self, *args)", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def compare(self, *args):\n return _ida_hexrays.carg_t_compare(self, *args)", "def __gt__(self, *args):\n return _ida_hexrays.cswitch_t___gt__(self, *args)", "def compare(self, *args):\n return _ida_hexrays.cdo_t_compare(self, *args)", "def __eq__(self, *args):\n return _ida_hexrays.operand_locator_t___eq__(self, *args)" ]
[ "0.73119915", "0.6936276", "0.67142797", "0.6697332", "0.6670698", "0.63985837", "0.6394015", "0.6347988", "0.6329906", "0.62401164", "0.6220372", "0.6147607", "0.6115169", "0.6115023", "0.6092435", "0.6043819", "0.6031347", "0.60203665", "0.5976809", "0.5946226", "0.5938605", "0.5913892", "0.5902411", "0.5895635", "0.5887231", "0.58860797", "0.5859541", "0.58149374", "0.58067644", "0.5806359", "0.5803631", "0.57886004", "0.57789767", "0.5771746", "0.576385", "0.576385", "0.57636875", "0.5758011", "0.5749525", "0.57331425", "0.5723586", "0.5723586", "0.57069874", "0.56802917", "0.5668694", "0.5646487", "0.56464684", "0.56378144", "0.56321555", "0.5624152", "0.5619684", "0.5614111", "0.55982417", "0.55944526", "0.559349", "0.55916137", "0.5591308", "0.5585888", "0.55740833", "0.5573371", "0.55674446", "0.5558765", "0.55582577", "0.5558132", "0.55490935", "0.55443525", "0.55360216", "0.55213165", "0.55107206", "0.55088156", "0.5508194", "0.54998195", "0.54959476", "0.54937434", "0.54836845", "0.54692113", "0.5458481", "0.5456144", "0.5451226", "0.5450325", "0.54445314", "0.5444509", "0.5438317", "0.5435146", "0.5433655", "0.54306203", "0.54287684", "0.54286057", "0.54283637", "0.54158515", "0.54158515", "0.5415625", "0.5413461", "0.5404913", "0.5390423", "0.5390371", "0.5389199", "0.53867465", "0.5379103", "0.5371218" ]
0.6528138
5
Convert list literal to function call.
def visit_List(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__list__'), node.elts) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "def list_sugar(self):\n return 'list(', ')'", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def escape_list(mylist, escape_func):\n def escape(obj, escape_func=escape_func):\n try:\n e = obj.escape\n except AttributeError:\n return obj\n else:\n return e(escape_func)\n return list(map(escape, mylist))", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def convert_list(f, parameters):\n variables = f[0].arguments()\n varpar = list(parameters) + list(variables)\n F = symbolic_expression([i(*variables) for i in f]).function(*varpar)\n lis = flatten([fast_callable(i,vars=varpar).op_list() for i in F], max_level=1)\n deflist = []\n stack = []\n const =[]\n stackcomp=[]\n detail=[]\n for i in lis:\n if i[0] == 'load_arg':\n stack.append(varpar[i[1]])\n elif i[0] == 'ipow':\n if i[1] in NN:\n basis = stack[-1]\n for j in range(i[1]-1):\n\t a=stack.pop(-1)\n\t detail.append(('mul', a, basis))\n\t stack.append(a*basis)\n\t stackcomp.append(stack[-1])\n else:\n detail.append(('pow',stack[-1],i[1]))\n stack[-1]=stack[-1]**i[1]\n stackcomp.append(stack[-1])\n\n elif i[0] == 'load_const':\n const.append(i[1])\n stack.append(i[1])\n elif i == 'mul':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('mul', a, b))\n stack.append(a*b)\n stackcomp.append(stack[-1])\n\n elif i == 'div':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('div', a, b))\n stack.append(b/a)\n stackcomp.append(stack[-1])\n\n elif i == 'add':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('add',a,b))\n stack.append(a+b)\n stackcomp.append(stack[-1])\n\n elif i == 'pow':\n a=stack.pop(-1)\n b=stack.pop(-1)\n detail.append(('pow', b, a))\n stack.append(b**a)\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='log':\n a=stack.pop(-1)\n detail.append(('log', a))\n stack.append(log(a))\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='exp':\n a=stack.pop(-1)\n detail.append(('exp', a))\n stack.append(exp(a))\n stackcomp.append(stack[-1])\n\n elif i[0] == 'py_call' and str(i[1])=='sin':\n a=stack.pop(-1)\n detail.append(('sin', a))\n detail.append(('cos', a))\n stackcomp.append(sin(a))\n stackcomp.append(cos(a))\n stack.append(sin(a))\n\n elif i[0] == 'py_call' and str(i[1])=='cos':\n a=stack.pop(-1)\n detail.append(('sin', a))\n detail.append(('cos', a))\n stackcomp.append(sin(a))\n stackcomp.append(cos(a))\n stack.append(cos(a))\n\n elif i == 'neg':\n a = stack.pop(-1)\n detail.append(('mul', -1, a))\n stack.append(-a)\n stackcomp.append(-a)\n\n return stackcomp,detail", "def list(self, arg: SeField[Any]) -> str:\n if is_bare_list(arg.type):\n return arg.varname\n else:\n earg = arg[0]\n earg.name = \"v\"\n return f\"[{self.render(earg)} for v in {arg.varname}]\"", "def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args", "def target_list_option(s):\n return _convert(s, (list, tuple))", "def from_list(l):\n if isinstance(l, str):\n for special_char in (' ', '\\n', '\\t', '(', ')', '\\\"'):\n if special_char in l:\n return '\\\"' + l + '\\\"'\n return l\n return '(' + ' '.join(from_list(e) for e in l) + ')'", "def list_option(s):\n return _convert(s, (list, tuple))", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def _builtin_split_call(term, parts, database=None, location=None, **kwdargs):\n functor = '=..'\n # modes:\n # <v> =.. list => list has to be fixed length and non-empty\n # IF its length > 1 then first element should be an atom\n # <n> =.. <list or var>\n #\n mode = check_mode((term, parts), ['vL', 'nv', 'nl'], functor=functor, **kwdargs)\n if mode == 0:\n elements, tail = list_elements(parts)\n if len(elements) == 0:\n raise CallModeError(functor, (term, parts),\n message='non-empty list for arg #2 if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) > 1 and not _is_atom(elements[0]):\n raise CallModeError(functor, (term, parts),\n message='atom as first element in list if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) == 1:\n # Special case => term == parts[0]\n return [(elements[0], parts)]\n else:\n term_part = elements[0](*elements[1:])\n return [(term_part, parts)]\n else:\n part_list = (term.with_args(),) + term.args\n current = Term('[]')\n for t in reversed(part_list):\n current = Term('.', t, current)\n try:\n local_values = {}\n list_part = unify_value(current, parts, local_values)\n elements, tail = list_elements(list_part)\n term_new = elements[0](*elements[1:])\n term_part = unify_value(term, term_new, local_values)\n return [(term_part, list_part)]\n except UnifyError:\n return []", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def maplist(f, xs):\n return list(map(f, xs))", "def decorator(arg):\n return lambda: list(arg)", "def my_evalf(expr, chop=False):\r\n if type(expr) == list:\r\n try:\r\n return [x.evalf(chop=chop) for x in expr]\r\n except:\r\n return expr\r\n try:\r\n return expr.evalf(chop=chop)\r\n except:\r\n return expr", "def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result", "def func_deserialize(self, args): # pragma: no cover\n if len(args) == 0:\n return []\n x = eval(args.decode(\"utf-8\"))\n return x", "def eval_f(f, xs):\n res_list = []\n for num in xs:\n #int_num = int(num)\n fun_num = f(num)\n res_list.append(fun_num)\n\n return res_list", "def mk_sql_list(ls):\n res = \"(\" + ' '.join([str(elem) for elem in intersperse(\",\", ls)]) + \")\"\n return res", "def f(*args):\n alist = [a() for a in args]\n print(alist)", "def test_expr_list_array_constructor():\n fcode = \"ACOS(-1.0), SIN(1.0), 1.0+3.0\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Value_List)", "def evlis(targetlist, a_list, d_list):\n if targetlist.null():\n return SExp(\"NIL\")\n return SExp(eval_lisp(targetlist.car(), a_list, d_list),\n evlis(targetlist.cdr(), a_list, d_list))", "def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def label(l):\r\n def action(string, loc, tokens):\r\n newlist = [l]\r\n newlist.extend(tokens)\r\n return newlist\r\n return action", "def apply_lisp(function, args, a_list, d_list):\n #TODO: doing everything in one function, and handling all cases is\n # one place, is terrible design. this refactor should go along\n # with redoing how S-expressions are represented.\n\n if not function.atom():\n msg = \"error: cannot call non-atom {0} as a function\".format(function)\n raise error.LispException(msg)\n #TODO: integrate the check_args call with the other primitives\n #definitions + help.\n if function in CAR:\n check_args(function, args.length(), 1)\n # caar, because only have one argument: a list\n return args.car().car()\n if function in CDR:\n check_args(function, args.length(), 1)\n return args.car().cdr() # cadr\n if function in CONS:\n check_args(function, args.length(), 2)\n # two arguments. the second one is (s . nil) but we only want the s\n return SExp(args.car(), args.cdr().car())\n if function in ATOM:\n check_args(function, args.length(), 1)\n return args.car().atom(sexp=True)\n if function in NULL:\n check_args(function, args.length(), 1)\n return args.car().null(sexp=True)\n if function in EQ:\n check_args(function, args.length(), 2)\n return args.car().eq(args.cdr().car(), sexp=True)\n if function in INT:\n check_args(function, args.length(), 1)\n return args.car().int(sexp=True)\n if function in PLUS:\n check_args(function, args.length(), 2)\n return args.car().plus(args.cdr().car())\n if function in MINUS:\n check_args(function, args.length(), 2)\n return args.car().minus(args.cdr().car())\n if function in TIMES:\n check_args(function, args.length(), 2)\n return args.car().times(args.cdr().car())\n if function in QUOTIENT:\n check_args(function, args.length(), 2)\n return args.car().quotient(args.cdr().car())\n if function in REMAINDER:\n check_args(function, args.length(), 2)\n return args.car().remainder(args.cdr().car())\n if function in LESS:\n check_args(function, args.length(), 2)\n return args.car().less(args.cdr().car())\n if function in GREATER:\n check_args(function, args.length(), 2)\n return args.car().greater(args.cdr().car())\n if function in HELP:\n check_args(function, args.length(), 0)\n print help_string\n return SExp(\"T\")\n if function in QUIT:\n check_args(function, args.length(), 0)\n exit()\n if not in_pairlist(function, d_list):\n raise error.LispException(\"function {0} not found\".format(function))\n params = getval(function, d_list).car()\n check_args(function, args.length(), params.length())\n return eval_lisp(getval(function, d_list).cdr(),\n addpairs(params, args, a_list), d_list)", "def list_map(data, function):\n return list(map(function, data))", "def soft_list_eval(data):\n out = []\n for x in data:\n try:\n out.append(eval(x, {}))\n except:\n try:\n out.append(x.decode())\n except (AttributeError, SyntaxError):\n out.append(x)\n \n return out", "def zzX_eval_list(f, A):\n def rec_eval(g, l, L):\n if l == L:\n return zzx_eval(g, A[-1])\n else:\n h = [ rec_eval(h, l+1, L) for h in g ]\n\n if l <= L - len(A):\n return h\n else:\n return zzx_eval(h, A[-L+l-1])\n\n if not A:\n return f\n\n L = poly_level(f)\n\n if zzX_zero_p(f):\n return zzX_zero(L - len(A))\n\n e = rec_eval(f, 1, L)\n\n if L == len(A):\n return e\n else:\n return zzX_strip(e)", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def eval_lisp(exp, a_list, d_list):\n if exp.atom():\n if exp.int():\n return exp\n if exp in T:\n return SExp(\"T\")\n if exp.null():\n return SExp(\"NIL\")\n if in_pairlist(exp, a_list):\n return getval(exp, a_list)\n raise error.LispException(\"unbound variable: {0}\".format(exp))\n if exp.car().atom():\n if not exp.car().non_int_atom:\n msg = \"'{0}' is not a valid function name or \" \\\n \"special form\".format(exp.car())\n raise error.LispException(msg)\n\n #cdar because cdr only would give (quote 5) evaluating to (5),\n #not 5. only takes one argument.\n if exp.car() in QUOTE:\n check_args(exp.car(), exp.cdr().length(), 1)\n return exp.cdr().car()\n if exp.car() in COND:\n return evcond(exp.cdr(), a_list, d_list)\n if exp.car() in DEFUN:\n new_func = exp.cdr().car()\n args = exp.cdr().cdr().car()\n body = exp.cdr().cdr().cdr().car()\n check_args(new_func, exp.cdr().length(), 3)\n return defun(new_func, args, body, d_list)\n return apply_lisp(exp.car(),\n evlis(exp.cdr(), a_list, d_list),\n a_list, d_list)\n raise error.LispException(\"eval called with invalid expression\")", "def run_ast(ast):\n # base case, this is not a list, so just return value \n if type(ast) != list:\n return ast\n\n # inductive case, get the function and run function on rest of the params\n function_name = ast[0]\n # TODO: this can be streamlined, with some metaprogramming\n if function_name == \"list\":\n func = list\n elif function_name == \"first\":\n func = first\n\n return func(map(run_ast, ast[1:]))", "def set_function_list(self, L):\n\t\tself.function_list = L", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def encode_list(value: list, inner_encoder: typing.Callable) -> bytes:\n return encode_vector_of_t(list(map(inner_encoder, value)))", "def simple_function(arg1, arg2=1):\n return [arg1] * arg2", "def defun(f, args, body, d_list):\n if not f.non_int_atom():\n msg = \"'{0}' is not a valid function name\".format(f)\n raise error.LispException(msg)\n if f in PRIMITIVE_SEXPS:\n raise error.LispException(\"cannot redefine primitive '{0}'\".format(f))\n new_d_list = SExp(SExp(f, SExp(args, body)), copy.copy(d_list))\n d_list.copy(new_d_list)\n return f", "def replace(l):\r\n def action(string, loc, tokens):\r\n return [l]\r\n return action", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def quote_list(the_list):\n return [\"'%s'\" % element for element in the_list]", "def get_real_arg_list(tree, local_arg_list, accessories):\r\n # get and split arguments\r\n real_arg_list=[tree[1]] # value or expression\r\n arg_list = local_arg_list[:-len(accessories)] if accessories else local_arg_list\r\n if tree[1][0]==Comma and len(arg_list)>1: # if multiple arguments\r\n real_arg_list=tree[1][1] # split the real list\r\n real_arg_list=list(real_arg_list) # make sure it is a copy and not the original within tree\r\n \r\n # verify number of arguments\r\n if len(arg_list)!=len(real_arg_list) and len(funcargs)!=1: \r\n print(\"\\n\", Err_nb_args) # *** Error: Wrong number of arguments \r\n print(tree_join(tree), \"-->\", fun+str(arg_list))\r\n print(len(real_arg_list), \"<>\", len(arg_list) )\r\n raise ReferenceError\r\n \r\n return real_arg_list", "def _init_as_list(arg):\n if arg is None:\n return []\n elif isinstance(arg, str):\n return [arg]\n else:\n return arg", "def callArray(func,params,shape):\n call='%s'%func\n call+='np.array([%s.tolist()]*%i)'%(params[0],shape[1])\n for v in params[1:]:\n call+=', np.array([%s.tolist()]*%i).T'%(v,shape[0])\n return call+')'", "def compile_parameter_list(self):\r\n if self.__tokenizer.token_type() != TYPES_DIC[\"SYMBOL\"]:\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()\r\n while self.__tokenizer.symbol() != ')':\r\n self.__advance()\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()", "def decode_list(self, tokens: list) -> str:\r\n return NotImplementedError", "def test_call(self):\n self.assertEqual(['CALL', 'proc1', \"(1, 2)\"],\n grammar._CALL_EXPR.parseString(\"CALL proc1(1, 2);\").asList())", "def svn_client_invoke_list_func(svn_client_list_func_t__obj, void_baton, char_path, svn_dirent_t_dirent, svn_lock_t_lock, char_abs_path, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def get_list_dep() -> Callable:\n args = []\n body = [\" r = {}\"]\n # Apply list ops as annotations\n for list_op in self.list_ops:\n args += [f\"{list_op.name}: Optional[List[str]] = Query(None)\"]\n body += [\n f\" if {list_op.name} is not None:\",\n f' r[\"{list_op.name}\"] = {list_op.name}',\n ]\n code = [f\"def inner({', '.join(args)}) -> dict:\"] + body + [\" return r\"]\n r = {\"Optional\": typing.Optional, \"List\": typing.List, \"Query\": Query}\n exec(\"\\n\".join(code), {}, r)\n return r[\"inner\"]", "def to_list(ls):\r\n if isinstance(ls, (list, tuple)):\r\n return list(ls)\r\n else:\r\n return [ls]", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def test_listify(string, cast, expected):\n assert listify(string, cast) == expected", "def arg_comprehension(*args):\n\n my_list = [args for argument in args]\n print(my_list)", "def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result", "def _do_the_math(a_list):\n b_list = []\n for i in a_list:\n i = i.replace('pi', \"np.pi\")\n # ...\n i = str(eval(i)) # pylint: disable-msg=eval-used\n b_list.append(i)\n return b_list", "def convert_to_list(*args):\n converted_list = []\n for arg in args:\n if isinstance(arg, list):\n converted_list.extend(arg)\n else:\n converted_list.append(arg)\n\n return converted_list", "def _coerce_list(self, thing):\n\t\tif isinstance(thing, basestring):\n\t\t\treturn list((thing,))\n\n\t\treturn list(thing)", "def func(\n args_as_list: List[str], capture_output: bool = False, dir_to_execute: str = None\n ) -> List[str]:\n cmd = [binary] + args_as_list\n return self._run(cmd, capture_output, dir_to_execute)", "def list_ref(s_list, i):\n require_type(isa(s_list,List), 'parameters of list-ref must be a list')\n return s_list[i]", "def list_wrap(spec):\n if not isinstance(spec, list):\n spec = [spec]\n return spec", "def listify(val):\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]", "def visit_ListComp(self, node):\n try:\n (generator,) = node.generators\n except ValueError:\n raise NotImplementedError(\"Only single loop comprehensions are allowed\")\n\n names = find_names(generator.target)\n argslist = [ast.arg(arg=name.id, annotation=None) for name in names]\n if len(names) <= 1:\n signature = ast.arguments(\n args=argslist,\n vararg=None,\n kwonlyargs=[],\n kw_defaults=[],\n kwarg=None,\n defaults=[],\n )\n else:\n signature = ast.List(elts=argslist, ctx=ast.Load())\n\n array = generator.iter\n lam_sig = functools.partial(ast.Lambda, args=signature)\n\n filters = generator.ifs\n if filters:\n filt = ast.BoolOp(op=ast.And(), values=filters)\n # array.filter\n method = ast.Attribute(value=array, attr=\"filter\", ctx=ast.Load())\n # array.filter(func)\n array = ast.Call(func=method, args=[lam_sig(body=filt)], keywords=[])\n\n method = ast.Attribute(value=array, attr=\"map\", ctx=ast.Load())\n mapped = ast.Call(func=method, args=[lam_sig(body=node.elt)], keywords=[])\n result = self.visit(mapped)\n return result", "def test_int_literals_array_constructor():\n fcode = \"1, 2, 3\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Value_List)", "def simplelist(inline):\n return paramfinder.findall(inline)", "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def zzX_value(l, f):\n if type(f) is not list:\n return zzX_const(l, f)\n else:\n if not l:\n return f\n else:\n return [zzX_value(l-1, f)]", "def test_cast_list(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.cast([\"1\", \"2\"]) == [1.0, 2.0]", "def str_list_works(x):\n import ast\n x = ast.literal_eval(x)\n x = [n.strip() for n in x]\n return (x)", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def listify(obj):\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def call_statement(env, node):\n fun = env['f'][node.name]\n func_env = Environment(env).create(env['f'])\n args = fun['args'].interpret(env)\n call_args_interpretuated = node.args.interpret(env)\n args_counter = 0\n for arg in args:\n func_env['v'][arg] = call_args_interpretuated[args_counter].interpret(env)\n args_counter += 1\n fun['body'].interpret(func_env)\n return func_env['r']", "def OMList(self, l):\n # Except for the conversion of operands, this duplicates the default\n # implementation of python's list conversion to openmath in py_openmath\n return om.OMApplication(elem=om.OMSymbol(cdbase=self._cdbase, cd='Python', name='list', ), arguments=l)", "def __call__(self, X, Y=None, eval_gradient=False):\n list_pfunc = self._get_one_param('list_func')\n if(Y is None):\n Y =X\n for f in reversed(list_pfunc):\n X = f(X, Y=Y, eval_gradient=False)\n return X", "def __create_list(self, tokens : List[Union[str,int]]) -> List[List[Union[str,int]]]:\n if tokens:\n return [self.__add_instruction(cp(tokens[:1+syntaxParametersDict.get(tokens[0])]))] + self.__create_list(cp(tokens[1+syntaxParametersDict.get(tokens[0]):]))\n return []", "def list_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=list)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def expand(self) -> List[TOKEN]:\n return [self.function, *self.args]", "def list_to_perl_string(input_list):\n elems = []\n for v in input_list:\n t = type(v).__name__\n if t == 'str':\n elems.append(\"\\\"%s\\\"\" % escape_perl_string(v))\n elif t == 'int':\n elems.append(\"%d\" % v)\n elif t == 'float':\n elems.append(\"%f\" % v)\n elif t == 'list':\n elems.append(\"%s\" % list_to_perl_string(v))\n elif t == 'dict':\n elems.append(\"%s\" % dict_to_perl_string(v))\n else:\n raise Exception(\"Unsupported type \" + str(t))\n return \"[%s]\" % \", \".join(elems)", "def makelist(inlist, listchar='', stringify=False, escape=False, encoding=None):\n if stringify:\n inlist = list_stringify(inlist)\n listdict = {'[' : '[%s]', '(' : '(%s)', '' : '%s'}\n outline = []\n # this makes '[' the default for empty or single value lists\n if len(inlist) < 2:\n listchar = listchar or '['\n for item in inlist:\n if not isinstance(item, (list, tuple)):\n if escape:\n item = quote_escape(item)\n outline.append(elem_quote(item, encoding=encoding))\n else:\n # recursive for nested lists\n outline.append(makelist(item, listchar or '[', \n stringify, escape, encoding))\n return listdict[listchar] % (', '.join(outline))", "def make_list(expr, kind):\n if isinstance(expr, kind):\n return list(expr[:])\n else:\n return [expr]", "def sqllist(lst):\n if isinstance(lst, basestring): \n return lst\n else:\n return ', '.join(lst)", "def ast_for_testlist(testlist):\n if isinstance(testlist, list):\n value = ast.Tuple()\n value.elts = testlist\n value.ctx = Load\n else:\n value = testlist\n return value", "def paranthesis_list(output_name, input_var=_db_name):\n return '(' + delimitedList(input_var).setResultsName(output_name) + ')'", "def callmethod(\n self, method: str, *args: Sequence[Any], **kwargs: Sequence[Any]\n ) -> List[Any]:\n return getattr(self, method)(*args, **kwargs)", "def evalfunc(c, flist, x):\n return sum([c[i] * flist[i](x) for i in range(len(flist))])", "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def listify(x):\n\n if isinstance(x, list):\n return x\n elif isinstance(x, tuple):\n return list(x)\n else:\n return [x]", "def make_list_uri(_type, args):\n arglist = list_to_uri_piece(args)\n return '/%s/%s' % (_type, arglist)", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def eval_arg(arg_value, arg_name=''):\n if arg_name.lower().endswith('_list') and isinstance(arg_value, str):\n return [eval_arg(cell) for cell in arg_value.split(',')]\n if not isinstance(arg_value, str):\n return arg_value\n if arg_value.lower() in ['true', 'false']:\n return eval(arg_value.capitalize())\n if arg_value.lstrip('-').isdigit():\n return int(arg_value)\n if arg_value.replace('.', '', 1).isdigit():\n return float(arg_value)\n return arg_value", "async def infer_shape_list_map(track, fn, *lsts):\n argrefs = [TransformedReference(track.engine, getelement, xs)\n for xs in lsts]\n return ListShape(await (await fn['shape'])(*argrefs)) # noqa: W606", "def cmd_list(args):" ]
[ "0.6245148", "0.61611956", "0.60648584", "0.6038298", "0.59792304", "0.59553707", "0.59013915", "0.5873387", "0.5854381", "0.5801732", "0.57718545", "0.5754236", "0.5722947", "0.5686056", "0.5682627", "0.56783223", "0.5655683", "0.5624097", "0.5621373", "0.5564674", "0.55535793", "0.55439407", "0.55419135", "0.55281377", "0.55103797", "0.5499892", "0.5498203", "0.5474634", "0.54696244", "0.54537505", "0.543134", "0.54205376", "0.54029816", "0.53966767", "0.539559", "0.53811675", "0.53669053", "0.5365654", "0.53636444", "0.535528", "0.53460276", "0.5332629", "0.5322303", "0.5314706", "0.5302728", "0.5300933", "0.5297077", "0.5295301", "0.52763444", "0.5260139", "0.5259998", "0.5255261", "0.52367413", "0.52324796", "0.5224251", "0.5220775", "0.51795876", "0.5172331", "0.5171827", "0.5162006", "0.516073", "0.51557964", "0.514298", "0.514236", "0.5131145", "0.51274765", "0.5127106", "0.5120476", "0.5109183", "0.51059085", "0.50924295", "0.50772685", "0.5071106", "0.5070363", "0.5066035", "0.5055045", "0.50547934", "0.5052646", "0.5046397", "0.50389075", "0.5036777", "0.5033221", "0.50265425", "0.50131404", "0.5004971", "0.49920624", "0.49909967", "0.4986137", "0.49748704", "0.49715275", "0.49700624", "0.49575865", "0.495159", "0.49510056", "0.49505165", "0.49437988", "0.49414867", "0.49344137", "0.49320355", "0.4923958" ]
0.6453626
0
Convert tuple literal to function call.
def visit_Tuple(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__tuple__'), node.elts) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_func_tuple(f_args):\n return f_args[0](*f_args[1:])", "def tuple(x):\n pass", "def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def from_literal(tup):\n\n def expand(vals):\n return [from_literal(x) for x in vals]\n\n def union(vals):\n if not isinstance(vals, tuple):\n vals = (vals,)\n v = expand(vals)\n return frozenset(v)\n\n if not isinstance(tup, tuple):\n return ('prim', tup)\n elif isinstance(tup[0], str):\n tag, *vals = tup\n if tag == 'prim':\n return tup\n elif tag == 'tuple':\n params = tuple(expand(vals))\n return (tag, params)\n elif tag == 'map':\n k, v = vals\n return (tag, (union(k), union(v)))\n else:\n vals, = vals # pylint: disable=self-assigning-variable\n return (tag, union(vals))\n else:\n return tuple(expand(tup))", "def map_generate_tuple(*args):\n key, func, arg = args[0][0], args[0][1], args[0][2]\n return (key, func(*arg))", "def tuple2func(func1, func2):\n return lambda e: (func1(e), func2(e))", "def dec_tupl(fnc,*data):\n def wrapper(*args, **kwargs):\n return (fnc(*args,**kwargs),) + data\n return wrapper", "def get_call_value(call_node):\n s = get_name_value(call_node.func)\n if isinstance(call_node.func.ctx, ast.Load):\n # convert ast args to literals\n args = [convert_arg(a) for a in call_node.args]\n # suround literal strings with a set of quotes for easy placing into\n # a string\n args = ['\"' + a + '\"' if isinstance(a, str) else a for a in args]\n # join all the args into a set of parens\n s += \"(\" + \",\".join(args) + \")\"\n return s", "def tuple_from_sequence(*args):\n return tuple(args)", "def convert_to_tuple(self, tuple_str):\n return ast.literal_eval(tuple_str)", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def process_tuple(self, raw_tuple, sbj, rel, obj):\n pass", "def main():\n sampleTuple = (100, 200, 300)\n print(tupleStrFormat(sampleTuple))", "def test_star_args_with_tuple():\n arg_tuple = ('blue', 'red', 'yellow', 'orange')\n assert arguments.fun_star_params(*arg_tuple) == ('blue', 'red', 'yellow',\n 'orange')", "def call_statement(env, node):\n fun = env['f'][node.name]\n func_env = Environment(env).create(env['f'])\n args = fun['args'].interpret(env)\n call_args_interpretuated = node.args.interpret(env)\n args_counter = 0\n for arg in args:\n func_env['v'][arg] = call_args_interpretuated[args_counter].interpret(env)\n args_counter += 1\n fun['body'].interpret(func_env)\n return func_env['r']", "def tuple_map(x):\n return x * 2", "def _eval_str_tuple(value):\n if not (value.startswith('(') and value.endswith(')')):\n raise ValueError(value)\n\n orig_value = value\n value = value[1:-1]\n\n result = []\n while value:\n m = _strs.match(value)\n if m is None:\n raise ValueError(orig_value)\n\n result.append(m.group(1))\n value = value[len(m.group(0)):]\n\n return tuple(result)", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def etuple(*args, **kwargs):\n return ExpressionTuple(args, **kwargs)", "def make_tuple(tuple_like):\n tuple_like = (\n tuple_like\n if isinstance(tuple_like, (list, tuple))\n else (tuple_like, tuple_like)\n )\n return tuple_like", "def generate_from_tuple(t):\n\n data_generator(*t)", "def extract_tuple_function(programs, replacement_marker, validity_function):\n\n flat_programs=[]\n for p in programs:\n flat_programs+=p\n\n # Create a dictionary that will be used to translate lists of tuples to list of integers, and an inverse dictionary to translate in the reverse way.\n d=tuple_int_translator(flat_programs)\n reverse_d={v: k for k, v in d.items()}\n reverse_d[-1]=replacement_marker\n\n int_programs=[[d[t] for t in p] for p in programs]\n\n # Define a new validity function that works on lists of integers rather than lists of tuples, using the provided validity function.\n def int_validity_function(program):\n tuple_program=[reverse_d[i] for i in program]\n return validity_function(tuple_program)\n\n function, new_int_programs=extract_int_function(int_programs, -1, int_validity_function)\n\n tuple_function=[reverse_d[i] for i in function]\n\n new_programs=[[reverse_d[i] for i in p] for p in new_int_programs]\n\n return new_programs, tuple_function", "def strtuple(iterable): \n string = ''\n function = type(strtuple)\n for i in iterable:\n if isinstance(i , function):\n string += i.__name__ + ', '\n else:\n string += str(i) + ', '\n string = string.rstrip(', ')\n string = '(' + string + ')'\n return string", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _convert_to_tuple(r):\n if not r:\n return r\n else:\n return (r[\"token\"], r[\"value\"], r[\"code\"], r[\"address\"],)", "def parse_tuple(tuple_string):\n return tuple_string.strip().strip(\"\\\"[]\")", "def process_let_binding(binding):\n if isinstance(binding[0], str):\n return tuple(binding)\n elif isinstance(binding[0], list):\n name = binding[0][0]\n params = binding[0][1:]\n body = binding[1]\n return tuple([name, ['lambda', params, body]])\n else:\n raise SnekEvaluationError('let binding pair cannot process type {}'.format(type(binding)))", "def func_subs(t, Func_expr, func, t0):\n assert(isinstance(type(Func_expr), UndefinedFunction))\n pos = Func_expr.args.index(t)\n\n def frozen(*args):\n # tuples are immutable\n L = list(args)\n L.insert(pos, t0)\n new_args = tuple(L)\n return func(*new_args)\n return frozen", "def no_parentheses():\n weird_tuple = 1, 2, 3\n print(weird_tuple) # (1, 2, 3)\n print(type(weird_tuple)) # <type 'tuple'>", "def construct_tuple(self, node):\n return tuple(ImportLoader.construct_sequence(self, node))", "def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))", "def convert_raw_tuple(value_tuple, format_string):\n values = []\n for v, c in zip(value_tuple, format_string):\n if v is None:\n # append None\n values.append(v)\n elif c == u\"s\":\n # string\n values.append(v)\n elif c == u\"S\":\n # string, split using space as delimiter\n values.append([s for s in v.split(u\" \") if len(s) > 0])\n elif c == u\"i\":\n # int\n values.append(int(v))\n elif c == u\"U\":\n # Unicode\n values.append(convert_unicode_field(v))\n elif c == u\"A\":\n # ASCII\n values.append(convert_ascii_field(v))\n #elif c == u\"x\":\n # # ignore\n # pass\n return tuple(values)", "def convert_tuple(self, v, t):\n return tuple(self(v, t)\n for v, t in zip(v, t.elements))", "def add_make_tuple(self, input_names, name=None):\n return self._build_op('make_tuple', input_names, name=name)", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def _calc2call(func):\n def _converter(inp, *x):\n if func.n_inputs == 1:\n retvals = func.evaluate(x[0], *inp)\n else:\n retvals = func.evaluate(x[0], x[1], *inp)\n return retvals\n return _converter", "def _tupstr(tuple_):\n return ', '.join(list(map(str, tuple_)))", "def generate_from_tuple(cls, t):\n\n cls.generate(*t)", "def tupleize(num_items=None, conv=float, dtype=tuple):\n def split_values(value):\n \"\"\"Convert comma-separated string *value* to a tuple of numbers.\"\"\"\n try:\n result = dtype([conv(x) for x in value.split(',')])\n except:\n raise argparse.ArgumentTypeError('Expect comma-separated tuple')\n\n if num_items and len(result) != num_items:\n raise argparse.ArgumentTypeError('Expected {} items'.format(num_items))\n\n return result\n\n return split_values", "def get_value_tuple_outer_function(index, tuple_input):\n return (tuple_input[0][index],\n tuple_input[1][index],\n tuple_input[2][index])", "def _polymorph_args_to_tuple( args):\n\n if len(args) == 1 and isinstance(args[0], Iterable):\n args = args[0]\n return list(tuple(args))", "def convert_input_to_tuple(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n data = args[0].api.payload\n try:\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n except Exception:\n data = json.dumps(data)\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n\n return wrapper", "def translate_call_to_sql(self, query, expr, state):\n args = [query.expression_to_sql(arg, state) for arg in expr.args]\n distinct = expr.distinct and 'DISTINCT ' or ''\n # this will generate a possibly new name, which is why we call this here\n # so we can consider that in the function call translation generated below:\n self.load()\n return f'{self.get_name()}({distinct}{\", \".join(args)})'", "def tuple_operation(a: list, b: list, op: str) -> list:\n o = []\n for i in range(0, 3):\n if op == \"xor\":\n o.append(a[i] ^ b[i])\n elif op == \"and\":\n o.append(a[i] & b[i])\n elif op == \"or\":\n o.append(a[i] | b[i])\n else:\n raise RuntimeError('Unknown operation')\n return o[0], o[1], o[2]", "def parameter_tuple_maker(parameter_code, code_list, i):\n\n return (parameter_code, code_list[i])", "def visit_Call(self, node: ast.Call) -> None:\n self._check_unnecessary_literals(node)\n self.generic_visit(node)", "def tupleStrFormat(tupl):\n string = \"this is a tuple (\"\n for element in tupl:\n string += str(element) + \", \"\n string += \")\"\n return string", "def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])", "def _convert_args(self, expr, args, kwargs):\n assert expr is not None\n\n if not kwargs:\n return args\n\n if kwargs and not isinstance(expr, Function):\n raise Exception(\"can only supply keyword parameters for a \"\n \"relay.Function, found {0}\".format(expr))\n\n params = expr.params\n param_names = [p.name_hint for p in params]\n num_of_args = len(args)\n\n cargs = list(args)[:]\n for i, name in enumerate(param_names):\n if i < num_of_args:\n if kwargs.get(name):\n raise Exception(\n \"duplicate argument supplied in \"\n \"both positional args (at position: {0}), \"\n \"and keyword argument (with name: {1})\".format(i, name))\n else:\n cargs.append(kwargs[name])\n\n if len(cargs) != len(params):\n raise Exception(\n \"insufficient arguments, expected \"\n \"{0}, provided {1}\".format(len(cargs), len(params)))\n\n return tuple(cargs)", "def _unwrap_simple_call(self, node: ast.expr) -> ast.expr:\n if isinstance(node, ast.Call) and len(node.args) == 1 and not node.keywords:\n return self._unwrap_simple_call(node.args[0])\n return node", "def FunctionCall(self):\n id = self.currtok[0]\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n params = list()\n\n while self.currtok[1].name in {\"BOOL\", \"INTLIT\", \"IDENT\", \"REAL\"}:\n param = self.Expression()\n if self.currtok[1].name != \"RPAREN\":\n if self.currtok[1].name == \"COMMA\":\n self.currtok = next(self.tg)\n else:\n raise SLUCSyntaxError(\"ERROR: Missing comma on line {0}\".format(str(self.currtok[2] - 1)))\n params.append(param)\n\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n return FuncIDExpr(id, params)\n\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def try_tuple(obj):\n # type: (Union[T, Tuple[T]]) -> Tuple[T]\n if isinstance(obj, tuple):\n return obj\n\n return obj, # NOTE the comma, made into tuple", "def tuple_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=tuple)", "def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])", "def _Call(self, t):\n # check calls but let attributes check in their own dispatcher\n funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator\n if isinstance(t.func, ast.Name):\n if (t.func.id not in funcs):\n self.RaiseWarning(t, \"Function call is not a defined FLAME GPU device function or a supported python built in.\")\n # dispatch even if warning raised\n self.dispatch(t.func)\n elif isinstance(t.func, ast.Lambda):\n self.dispatch(t.func) # not supported\n else:\n # special handler for dispatching member function calls\n # This would otherwise be an attribute\n self.dispatchMemberFunction(t.func, t) \n self.write(\"(\")\n self._CallArguments(t)\n self.write(\")\")", "def convert_to_tuple(v):\n if not isinstance(v, tuple):\n return tuple(v)\n else:\n return v", "def convert_to_user_call(*args):\n return _ida_hexrays.convert_to_user_call(*args)", "def call(swipl, *terms, **kwargs):\n for kwarg in kwargs:\n if kwarg not in [\"module\"]:\n raise KeyError\n\n module = kwargs.get(\"module\", None)\n\n t = terms[0]\n for tx in terms[1:]:\n t = Functor(\",\", swipl, 2)(t, tx)\n\n return swipl.PL_call(t.handle, module)", "def from_tuple(cls, coords):\n return cls(*coords)", "def visit_Call(self, node):\n assert hasattr(node, 'args')\n if node.args:\n assert isinstance(node.args[0], gast.Starred)\n # modify args\n if isinstance(node.args[0].value, gast.Name):\n node.args[0].value.id += '_new'\n\n assert hasattr(node, 'keywords')\n if node.keywords:\n assert isinstance(node.keywords[0], gast.keyword)\n self.generic_visit(node)\n return node", "def tuple_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (tuple, collections.abc.Sequence)):\n name = type(var).__name__\n raise TupleError(\n 'Function {} expected tuple, {} got instead.'.format(func, name))", "def to_literal(typ, always_tuple=False):\n\n def expand(params):\n return (to_literal(x) for x in params)\n\n def union(params):\n ret = tuple(sorted(expand(params), key=str))\n if len(ret) == 1 and not always_tuple:\n ret, = ret # pylint: disable=self-assigning-variable\n return ret\n\n tag, params = typ\n if tag == 'prim':\n return params\n elif tag == 'tuple':\n vals = tuple(expand(params))\n return (tag, *vals)\n elif tag == 'map':\n k, v = params\n return (tag, union(k), union(v))\n else:\n return (tag, union(params))", "def format_tuple(data):\n return \",\".join([str(item) for item in data])", "def from_tuple(cls, t):\n return cls(t[0], t[1])", "def give_me_a_tuple():\n my_tuple = ('p','e','r','m','i','t')\n return my_tuple\n pass", "def test_call(self):\n self.assertEqual(['CALL', 'proc1', \"(1, 2)\"],\n grammar._CALL_EXPR.parseString(\"CALL proc1(1, 2);\").asList())", "def _call_op_sugar(self, op_idx, *args):\n if not all(isinstance(a, six.integer_types) for a in args):\n raise TypeError('All args passed to call_op must be integers '\n '(LoomResult ids.) Did you forget to call constant?')\n result = self._weaver.CallOp(op_idx, args)\n if not result:\n raise AssertionError('Weaver op call failed: %s' %\n self._weaver.error_string())\n if len(result) == 1:\n return result[0]\n return result", "def tuple_ty(*tuple_types : MIRType) -> 'MIRTupleType':\n return MIRTupleType(list(tuple_types))", "def as_args(function):\n return lambda x: function(*x)", "def __tuple_to_scalar(tuple_value):\n if isinstance(tuple_value, tuple) and len(tuple_value)==1:\n return tuple_value[0]\n else:\n return tuple_value", "def build_tuple(self, t):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(t, comma)\n return self.build_container(\n repr_elems, self.left_parenthesis, self.right_parenthesis)", "def serialize_tuple(self, obj):\n return '(' + ''.join([self.serialize(i) for i in obj]) + 't'", "def test_star_args_with_tuple_and_dict():\n arg_tuple = ('orange', 'yellow')\n arg_dict = {'visited_color': 'red',\n 'link_color': 'blue'}\n\n assert arguments.fun_star_params(*arg_tuple, **arg_dict) == ('orange',\n 'yellow',\n 'red',\n 'blue')", "def on_apply(self, node):\n if node.inputs[0].is_constant(Primitive):\n fn = node.inputs[0].value\n conv = MAP.get(fn)\n if conv is not None:\n return conv(self, *node.inputs[1:])\n return relay.Call(self.ref(node.inputs[0]),\n [self.ref(i) for i in node.inputs[1:]])", "def call(self, func):\n\t\targs = tuple(self.__dict__.values())\n\t\ttry:\n\t\t\treturn eval(\"func\" + str(args))\n\t\texcept Exception, e:\n\t\t\traise ValueError(\"Given Function is not valid for calling: %s\" % e)", "def create_tuple(self, node, offset):\n raise NotImplementedError", "def single_element_tuple():\n single = (1,)\n print(type(single)) # <type 'tuple'>", "def deg_tuple_to_str(tup):\n if len(tup) == 0:\n return \"()\"\n str = '('\n for x in tup:\n str += \"{0:.2f}, \".format(x)\n str = str[:-2] + ')'\n return str", "def safe_conversion(value):\n try:\n value = ast.literal_eval(value)\n value = list(value) if isinstance(value, tuple) else value\n return value\n except ValueError:\n return value", "def hinted_tuple_hook(obj):\n if '__tuple__' in obj:\n return tuple(obj['items'])\n return obj", "def delimit_tuple(tuple_: tuple, delimiter=\",\"):\n if not type(tuple_) == tuple:\n raise TypeError(\n \"Expected a list or tuple, \" \"but got {}\".format(type(tuple_).__name__)\n )\n return delimiter.join(map(str, tuple_))", "def mapper(fun: Callable[[str], Pin], /) -> None:", "def timetuple(self, *args, **kwargs): # real signature unknown\r\n pass", "def astuple(v: Any) -> Tuple[Any, ...]:\n return to_tuple(v, reuse_instances=False, convert_sets=False)", "def from_string_tuple(string_tuple):\n left = string_tuple[0].split(\" \")\n right = string_tuple[1].split(\" \")\n return Context(left, right)", "def tuple_gen(self, gen):\n for r in gen:\n yield self.convert_v6_to_tuple(r)", "def visit_tuple_type(self, left: TupleType) -> T:", "def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)", "def _formatter_func(self, tup):\n formatter_funcs = [level._formatter_func for level in self.levels]\n return tuple(func(val) for func, val in zip(formatter_funcs, tup))", "def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string", "def squared_call(fn, arg):\n return fn(fn(arg))", "def eval(*args, **kwargs):\n\n pass", "def _builtin_split_call(term, parts, database=None, location=None, **kwdargs):\n functor = '=..'\n # modes:\n # <v> =.. list => list has to be fixed length and non-empty\n # IF its length > 1 then first element should be an atom\n # <n> =.. <list or var>\n #\n mode = check_mode((term, parts), ['vL', 'nv', 'nl'], functor=functor, **kwdargs)\n if mode == 0:\n elements, tail = list_elements(parts)\n if len(elements) == 0:\n raise CallModeError(functor, (term, parts),\n message='non-empty list for arg #2 if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) > 1 and not _is_atom(elements[0]):\n raise CallModeError(functor, (term, parts),\n message='atom as first element in list if arg #1 is a variable',\n location=database.lineno(location))\n elif len(elements) == 1:\n # Special case => term == parts[0]\n return [(elements[0], parts)]\n else:\n term_part = elements[0](*elements[1:])\n return [(term_part, parts)]\n else:\n part_list = (term.with_args(),) + term.args\n current = Term('[]')\n for t in reversed(part_list):\n current = Term('.', t, current)\n try:\n local_values = {}\n list_part = unify_value(current, parts, local_values)\n elements, tail = list_elements(list_part)\n term_new = elements[0](*elements[1:])\n term_part = unify_value(term, term_new, local_values)\n return [(term_part, list_part)]\n except UnifyError:\n return []", "def parameter_tuple_parser(parameter_tuple, code_list, relative_base):\n\n if parameter_tuple[0] == 0:\n code_list_lengthener(code_list, parameter_tuple[1])\n return code_list[parameter_tuple[1]]\n elif parameter_tuple[0] == 1:\n return parameter_tuple[1]\n elif parameter_tuple[0] == 2:\n return code_list[parameter_tuple[1] + relative_base]\n else:\n print('And I oop.... parameter_tuple_parser')", "def format_task_call(task: \"Task\", args: Tuple, kwargs: dict) -> str:\n all_args = OrderedDict()\n sig = task.signature\n\n for i, param in enumerate(sig.parameters.values()):\n if i < len(args):\n # Positional argument.\n all_args[param.name] = args[i]\n\n else:\n # Keyword argument.\n all_args[param.name] = kwargs.get(param.name, param.default)\n\n args_text = \", \".join(format_arg(arg_name, value) for arg_name, value in all_args.items())\n return \"{task}({args})\".format(\n task=task.fullname,\n args=args_text,\n )", "def _silent_ntuple_ ( s , *args ) :\n from Ostap.Utils import mute_py \n with mute_py() : \n tup = _TU.__nTuple__(s , *args )\n logger.info ( 'Booked n-tuple %s' % tup )\n return tup", "def CALL(name, *args):\r\n funcname = 'is_' + name\r\n func = getattr(libueye, funcname)\r\n new_args = []\r\n for a in args: \r\n if isinstance (a, unicode):\r\n print name, 'argument',a, 'is unicode'\r\n new_args.append (str (a))\r\n else:\r\n new_args.append (a)\r\n return func(*new_args)" ]
[ "0.7458055", "0.63818896", "0.6287098", "0.62121403", "0.6174", "0.61714363", "0.61672425", "0.61028904", "0.6084416", "0.606182", "0.6054684", "0.6038045", "0.5894287", "0.58655924", "0.5855375", "0.5848195", "0.58276147", "0.58071357", "0.57829064", "0.57389504", "0.56750906", "0.56040084", "0.5529752", "0.54964846", "0.5477384", "0.5474606", "0.5474606", "0.5468391", "0.5445261", "0.54175663", "0.539307", "0.53821254", "0.5351724", "0.53053063", "0.53036153", "0.52973956", "0.5289431", "0.5266344", "0.52567106", "0.5253317", "0.5249351", "0.524699", "0.52264977", "0.521837", "0.5215921", "0.5207512", "0.52064306", "0.5201095", "0.51991326", "0.51981246", "0.519726", "0.51953995", "0.51917845", "0.5185729", "0.51848865", "0.51764154", "0.5170337", "0.5152665", "0.5146992", "0.513935", "0.51287025", "0.5124383", "0.51232386", "0.51203215", "0.5104217", "0.50987816", "0.509838", "0.5088413", "0.50853115", "0.50666577", "0.50594455", "0.5035541", "0.50340307", "0.5028025", "0.50061285", "0.50010526", "0.50008816", "0.49978247", "0.4993584", "0.49900034", "0.49807", "0.49697194", "0.49637806", "0.4959727", "0.49564913", "0.49409622", "0.4933359", "0.49225694", "0.4919469", "0.4912508", "0.49105248", "0.4903978", "0.49010682", "0.49004954", "0.4894694", "0.48857275", "0.48818696", "0.48753983", "0.48679504", "0.48622185" ]
0.65799105
1
Convert set literal to function call.
def visit_Set(self, node): self.generic_visit(node) return to_call(to_attribute(self.operator, '__set__'), node.elts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, arg: SeField[Any]) -> str:\n if is_bare_set(arg.type):\n return f\"list({arg.varname}) if convert_sets else {arg.varname}\"\n else:\n earg = arg[0]\n earg.name = \"v\"\n return (\n f\"[{self.render(earg)} for v in {arg.varname}] \"\n f\"if convert_sets else set({self.render(earg)} for v in {arg.varname})\"\n )", "def _apply_to_sets(self, func, operation, keys, *args):\n keys = self._list_or_args(keys, args)\n if not keys:\n raise TypeError(\"{} takes at least two arguments\".format(operation.lower()))\n left = self._get_set(keys[0], operation) or set()\n for key in keys[1:]:\n right = self._get_set(key, operation) or set()\n left = func(left, right)\n return left", "def set(x):\n pass", "def parse_set_cmd(self, line):\n _, set_type, var_name, _, set_name = line.split()\n if set_type not in SET_TYPES:\n self.print_error(f\"Currently can't set system '{set_type}'.\"\n + \" Please choose from:\\n\\t* \"\n + \"\\n\\t* \".join(SET_TYPES)\n )\n else:\n set_fnc = f\"parse_set_{set_type}\"\n if set_fnc not in dir(self):\n self.print_error(\"BUG IN CODE! Tell Matt that he needs to \"\n + f\"implement the function '{set_fnc}'\")\n\n getattr(self, set_fnc)(line)", "def imageset(*args):\n from .fancysets import ImageSet\n from .setexpr import set_function\n\n if len(args) < 2:\n raise ValueError('imageset expects at least 2 args, got: %s' % len(args))\n\n if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:\n f = Lambda(args[0], args[1])\n set_list = args[2:]\n else:\n f = args[0]\n set_list = args[1:]\n\n if isinstance(f, Lambda):\n pass\n elif callable(f):\n nargs = getattr(f, 'nargs', {})\n if nargs:\n if len(nargs) != 1:\n raise NotImplementedError(filldedent('''\n This function can take more than 1 arg\n but the potentially complicated set input\n has not been analyzed at this point to\n know its dimensions. TODO\n '''))\n N = nargs.args[0]\n if N == 1:\n s = 'x'\n else:\n s = [Symbol('x%i' % i) for i in range(1, N + 1)]\n else:\n s = inspect.signature(f).parameters\n\n dexpr = _sympify(f(*[Dummy() for i in s]))\n var = tuple(uniquely_named_symbol(\n Symbol(i), dexpr) for i in s)\n f = Lambda(var, f(*var))\n else:\n raise TypeError(filldedent('''\n expecting lambda, Lambda, or FunctionClass,\n not \\'%s\\'.''' % func_name(f)))\n\n if any(not isinstance(s, Set) for s in set_list):\n name = [func_name(s) for s in set_list]\n raise ValueError(\n 'arguments after mapping should be sets, not %s' % name)\n\n if len(set_list) == 1:\n set = set_list[0]\n try:\n # TypeError if arg count != set dimensions\n r = set_function(f, set)\n if r is None:\n raise TypeError\n if not r:\n return r\n except TypeError:\n r = ImageSet(f, set)\n if isinstance(r, ImageSet):\n f, set = r.args\n\n if f.variables[0] == f.expr:\n return set\n\n if isinstance(set, ImageSet):\n # XXX: Maybe this should just be:\n # f2 = set.lambda\n # fun = Lambda(f2.signature, f(*f2.expr))\n # return imageset(fun, *set.base_sets)\n if len(set.lamda.variables) == 1 and len(f.variables) == 1:\n x = set.lamda.variables[0]\n y = f.variables[0]\n return imageset(\n Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)\n\n if r is not None:\n return r\n\n return ImageSet(f, *set_list)", "def set_or_callable(value) -> frozenset[str] | Callable:\n if value is None:\n return frozenset()\n if callable(value):\n return value\n if isinstance(value, (frozenset, set, list)):\n return frozenset(value)\n return frozenset([str(value)])", "def set():\n pass", "def test_calls_in_set_with_name_and_value_arguments_only(self):\n some_calls = set([Call(\"one\", 1), Call(\"one\", 1.1), Call(\"two\", 2, 2.2), Call(\"two\", 2, \"two\")])\n assert_that(Call(\"one\", 1) in some_calls, equal_to(True))\n assert_that(Call(\"one\", 2) in some_calls, equal_to(False))", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def set():", "def _call_set(vecObj, val):\n res = vecObj.set(val)\n return res", "def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])", "def __call__(value):", "def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set", "def _function_set(self, data_length=self.data_length, number_of_lines=self.number_of_lines, character_font=self.character_font):\n function_set_mask = 32\n data = funtion_set_mask | (data_length << 4) | (number_of_lines << 3) | (character_font << 2)\n\n self.instruction(data)", "def setter(self, func):\n self.fset = func\n self.set_setattr_mode(SetAttr.CallObject_ObjectValue, func)\n return self", "def _set_function(self):\n value = 0x20 | self.power | self.addressing | self.instr\n self.command([value])", "def SetOperator(self, A):\n return _hypre.HypreLOBPCG_SetOperator(self, A)", "def generate_setLike_operations_properties(interface, set_like):\n setlike_ops = []\n \"\"\"\n Need to create a typedef for a function callback e.g.,\n a setlike will need a callback that has the proper args in FontFaceSet that is\n three arguments, etc.\n\n typedef void FontFaceSetForEachCallback(\n FontFace fontFace, FontFace fontFaceAgain, FontFaceSet set);\n\n void forEach(FontFaceSetForEachCallback callback, [Object thisArg]);\n \"\"\"\n callback_name = '%sForEachCallback' % interface.id\n set_op = generate_operation(interface.id, 'void', 'forEach',\n [[IDLType(None, callback_name), 'callback'],\n [IDLType(None, 'any'), 'thisArg', True]])\n setlike_ops.append(set_op)\n\n set_op = generate_operation(\n interface.id, 'boolean', 'has',\n [[IDLType(None, set_like.value_type.base_type), 'arg']])\n setlike_ops.append(set_op)\n\n if not set_like.is_read_only:\n # Issue #45676: `add` can return null on Firefox, so this should be\n # typed nullable.\n add_result_nullable = True\n set_op = generate_operation(\n interface.id, interface.id, 'add',\n [[IDLType(None, set_like.value_type.base_type), 'arg']],\n add_result_nullable)\n setlike_ops.append(set_op)\n set_op = generate_operation(\n interface.id, 'boolean', 'delete',\n [[IDLType(None, set_like.value_type.base_type), 'arg']])\n setlike_ops.append(set_op)\n set_op = generate_operation(interface.id, 'void', 'clear', [])\n setlike_ops.append(set_op)\n\n return setlike_ops", "def _setter_decor(self, fset):\n\n def fdec(obj):\n def _decor(fun):\n fset(obj, fun)\n return fun\n\n return _decor\n\n return self._init_inherit(fset=fset, fdec=fdec)", "def get(self, opset: OpsetVersion) -> Optional[Set[Callable]]:\n return self._functions.get(opset)", "def SetOperator(self, op):\n return _hypre.HypreGMRES_SetOperator(self, op)", "def do_set(self, arg):\n try:\n statement, param_name, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n param_name = param_name.strip().lower()\n if param_name not in self.settable:\n hits = [p for p in self.settable if p.startswith(param_name)]\n if len(hits) == 1:\n param_name = hits[0]\n else:\n return self.do_show(param_name)\n current_val = getattr(self, param_name)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(current_val, val)\n setattr(self, param_name, val)\n self.poutput('%s - was: %s\\nnow: %s\\n' % (param_name, current_val, val))\n if current_val != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % param_name)\n onchange_hook(old=current_val, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError):\n self.do_show(arg)", "def SetOperator(self, op):\n return _hypre.HypreADS_SetOperator(self, op)", "def __set_operation_function(self):\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval", "def __init__(self, setfunc, column, role, convertfunc):\n super(SetDataArgs, self).__init__()\n self.setfunc = setfunc\n self.column = column\n self.role = role\n self.convertfunc = convertfunc", "def do_set(self, arg):\n try:\n statement, paramName, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n paramName = paramName.strip().lower()\n if paramName not in self.settable:\n hits = [p for p in self.settable if p.startswith(paramName)]\n if len(hits) == 1:\n paramName = hits[0]\n else:\n return self.do_show(paramName)\n currentVal = getattr(self, paramName)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(currentVal, val)\n setattr(self, paramName, val)\n self.stdout.write('%s - was: %s\\nnow: %s\\n' % (paramName, currentVal, val))\n if currentVal != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % paramName)\n onchange_hook(old=currentVal, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError, NotSettableError):\n self.do_show(arg)", "def _functionset(self):\n\t\n\t\t#Instruciton is set based on __init__ () arguments\n\t\tinstruction = 0b00100000\n\t\tinstruction = instruction | self.bit_mode\n\t\tinstruction = instruction | self.line_num\n\t\tinstruction = instruction | self.char_height\n\t\t\n\t\tself._send(instruction, RS_INSTRUCTION)", "def _build_set_command(self, cmd, param, val):\n try:\n str_val = self._param_dict.format(param, val)\n set_cmd = '%s=%s' % (param, str_val)\n set_cmd = set_cmd + SBE37_NEWLINE\n \n except KeyError:\n raise InstrumentParameterException('Unknown driver parameter %s' % param)\n \n return set_cmd", "def SetOperator(self, A):\n return _hypre.HypreAME_SetOperator(self, A)", "def _handler_command_set(self, *args, **kwargs):\n next_state = None\n result = None\n\n # Retrieve required parameter.\n # Raise if no parameter provided, or not a dict.\n try:\n params = args[0]\n \n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n if not isinstance(params, dict):\n raise InstrumentParameterException('Set parameters not a dict.')\n \n # For each key, val in the dict, issue set command to device.\n # Raise if the command not understood.\n else:\n \n for (key, val) in params.iteritems():\n result = self._do_cmd_resp('set', key, val, **kwargs)\n self._update_params()\n \n return (next_state, result)", "def SetOperator(self, op):\n return _hypre.HypreAMS_SetOperator(self, op)", "def get_from_set(set_):\n for e in set_: return e", "def __call__(fun_name):", "def SetOperator(self, op):\n return _hypre.HypreILU_SetOperator(self, op)", "def call_to_key(method, arguments):\n def freeze(arg):\n if isinstance(arg, dict):\n items = dict((key, freeze(value)) for key, value\n in arg.iteritems())\n return frozenset(items.iteritems())\n elif isinstance(arg, list):\n return tuple([freeze(item) for item in arg])\n else:\n return arg\n\n new_args = []\n for arg in arguments:\n new_args.append(freeze(arg))\n return (method, tuple(new_args))", "def SetOperator(self, op):\n return _hypre.HypreSolver_SetOperator(self, op)", "def register(\n self, name: str, opset: OpsetVersion, func: Callable, custom: bool = True\n ) -> None:\n if \"::\" not in name:\n raise ValueError(\n f\"The name must be in the form of 'domain::op', not '{name}'\"\n )\n symbolic_functions = self._registry.setdefault(\n name, _SymbolicFunctionGroup(name)\n )\n if custom:\n symbolic_functions.add_custom(func, opset)\n else:\n symbolic_functions.add(func, opset)", "def call(self, func):\n\t\targs = tuple(self.__dict__.values())\n\t\ttry:\n\t\t\treturn eval(\"func\" + str(args))\n\t\texcept Exception, e:\n\t\t\traise ValueError(\"Given Function is not valid for calling: %s\" % e)", "def set(self):\n return AttributeFunctor(self, lambda x, y: y)", "def Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "def get_set(self, which_set):\n return (getattr(self, 'x_' + which_set),\n getattr(self, 'y_' + which_set))", "def load_set(self, set_name):\n if set_name == 'test':\n return self.xtest", "def setCall(self, *metaArgs, **kw):\n if metaArgs:\n equiv = True\n if self.lastMetaArgs is None:\n equiv = False\n elif len(metaArgs) != len(self.lastMetaArgs):\n equiv = False\n else:\n for k, arg in enumerate(metaArgs):\n try:\n thisEquiv = (arg == self.lastMetaArgs[k])\n except:\n thisEquiv = False\n if not thisEquiv:\n equiv = False\n break\n if equiv and not hasattr(self, 'pastInfo'):\n # We called this already with the same metaArgs and\n # without any pastInfo to reckon with, so there's\n # nothing to do.\n return self\n # Starting over with a new f\n callDict = {'f': metaArgs[0], 'fs': self._funcText(metaArgs[0])}\n args = metaArgs[1] if len(metaArgs) > 1 else []\n if not isinstance(args, (tuple, list)):\n args = [args]\n callDict['args'] = args\n callDict['kw'] = metaArgs[2] if len(metaArgs) > 2 else {}\n callDict['instance'] = None\n if self.whichThread:\n callDict['thread'] = threading.current_thread().name\n self.callDict = callDict\n elif hasattr(self, 'callDict'):\n # Adding to an existing f\n for name in ('args', 'kw', 'instance'):\n if name in kw:\n self.callDict[name] = kw[name]\n else:\n raise ValueError(\n \"You must supply at least a new function/string \"+\\\n \"or keywords adding args, kw to a previously set one\")\n if hasattr(self, 'currentID'):\n del self.currentID\n # Runs the property getter\n self.ID\n if metaArgs:\n # Save metaArgs to ignore repeated calls with the same metaArgs\n self.lastMetaArgs = metaArgs\n return self", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def _from_yaml_to_func(method, params):\r\n prm = dict()\r\n if params is not None:\r\n for key, val in params.items():\r\n prm[key] = eval(str(val))\r\n return eval(method)(**prm)", "def handle_set(self, agent) -> Tuple[Optional[str], Any]:\n ref_obj_d = {\"filters\": self.action_dict[\"filters\"]}\n ref_objs = self.subinterpret[\"reference_objects\"](\n self, self.speaker_name, ref_obj_d, extra_tags=[\"_physical_object\"]\n )\n if len(ref_objs) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n triples_d = self.action_dict[\"upsert\"][\"memory_data\"].get(\"triples\")\n if len(triples_d) == 1 and triples_d[0][\"pred_text\"] == \"has_name\":\n # the set has a name; check to see if one with that name exists,\n # if so add to it, else create one with that name\n name = triples_d[0][\"obj_text\"]\n set_memids, _ = self.memory.basic_search(\n \"SELECT MEMORY FROM Set WHERE (has_name={} OR name={})\".format(name, name)\n )\n if not set_memids:\n # make a new set, and name it\n set_memid = SetNode.create(self.memory)\n self.memory.add_triple(subj=set_memid, pred_text=\"has_name\", obj_text=name)\n else:\n # FIXME, which one\n set_memid = set_memids[0]\n else:\n # an anonymous set, assuming its new, and defined to hold the triple(s)\n set_memid = SetNode.create(self.memory)\n for t in triples_d:\n self.memory.add_triple(\n subj=set_memid, pred_text=t[\"pred_text\"], obj_text=t[\"obj_text\"]\n )\n for r in ref_objs:\n self.memory.add_triple(subj=r.memid, pred_text=\"member_of\", obj=set_memid)\n\n # FIXME point to the objects put in the set, otherwise explain this better\n self.memory.dialogue_stack_append_new(Say, \"OK made those objects into a set \")\n return None, None", "def __call__(self, function: FuncStrArg):\n self._add_attr(function)\n return function", "def SetOperator(self, op):\n return _hypre.HyprePCG_SetOperator(self, op)", "def test_with_set_tuple(self):\n self.assertListEqual([5, 4, 3, 2, 1], switch_reverser((1, 2, 3, 4, 5)))\n self.assertListEqual([5, 4, 3, 2, 1], switch_reverser({1, 2, 3, 4, 5}))", "def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))", "def _get_set(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'set', set(), decode=decode)", "def XCAFDoc_ShapeMapTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "def test_RestrictingNodeTransformer__visit_In_Set():\n assert restricted_eval('2 in {1, 1, 2, 3}') is True", "def preprocess_literal(op: str, literal: Any) -> Expression:\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)", "def set_instr(self, instr, set=True):\n assert instr in [self.INSTR_BASIC, self.INSTR_EXT], \"Instr must be INSTR_BASIC or INSTR_EXT.\"\n self.instr = instr\n if set:\n self._set_function()", "def SetOperator(self, op):\n return _hypre.HypreEuclid_SetOperator(self, op)", "def _from_yaml_to_func(method, params):\n prm = dict()\n if params is not None:\n for key, val in params.iteritems():\n prm[key] = eval(str(val))\n return eval(method)(**prm)", "def process_set(a_set: set) -> str:\n\n return ', '.join(str(s) for s in a_set) if a_set else Presenter.DEFAULT", "def apply_operator_set(model, operator_set):\n field_part = []\n for operator in operator_set:\n field_part.append(apply_const_shift_operator(model, operator))\n field_part = torch.cat(field_part)\n return field_part", "def set(**args):\n return Context(args)", "def setter(self, func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n self.fset = func\n return self", "def _transform_value_callable(\n self, value: dict | list | str, c: Callable, kwargs=None\n ) -> str | None | list[str] | None:\n # find signature of method and call with correct args\n kwargs = kwargs or {}\n try:\n sig = signature(c, follow_wrapped=True)\n if 'ti_dict' in sig.parameters:\n kwargs.update({'ti_dict': self.ti_dict})\n if 'transform' in sig.parameters:\n kwargs.update({'transform': self})\n except ValueError: # signature doesn't work for many built-in methods/functions\n pass\n\n # pass value to transform callable/method, which should always return a string\n return c(value, **kwargs)", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s", "def _set_action_attribute(self, action, index, setdataarg):\n if isinstance(setdataarg.column, int):\n column = setdataarg.column\n else:\n column = getattr(self, setdataarg.column)\n data = self.get_data(index, setdataarg.role, column)\n if data is None:\n return\n setattrmethod = getattr(action, setdataarg.setfunc)\n if setdataarg.convertfunc:\n data = setdataarg.convertfunc(data)\n setattrmethod(data)", "def _set_func(self, func):\n if callable(func):\n self._func = func\n else:\n raise TypeError(\"'func should be callable'\")", "def expand_callable(self, call_expr):\n call_expr.func = ast.Attribute(value=call_expr.func, attr='__call__')", "def set_func(self, func):\n li_dim = self.space.dim\n from . import func_tools as ft\n if (self.operator in _cst.list_FIELD_OPERATORS) or (len(self.func_arguments) == 0) :\n from .utils import function\n self.func = function(func, space=self.space)\n else:\n raise(\"Not used anymore. Dead code\")", "def Set(*args):\n return _XCAFDoc.XCAFDoc_LayerTool_Set(*args)", "def get_cmdset_callback(cmdset):\n string = self.format_output(obj, cmdset)\n self.msg(string.strip())", "def add(self, func: Callable, opset: OpsetVersion) -> None:\n # FIXME(titaiwang): Check if the \"function\" is ducplicated.\n self._functions.set_base(opset, func)", "def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)", "def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\\n};'\r\n return getSetDef", "def do_set(self, setting: str):\n if self._real_module is None:\n print(\"Set command depends on using a module. See 'use' for help.\")\n return\n\n splitted_input = setting.split()\n if len(splitted_input) < 2:\n print(\"Invalid argument to split\")\n else:\n key = splitted_input[0]\n value = \" \".join(splitted_input[1:])\n self._real_module.set(key, value)", "def set_func(self, function):\n self.get(COMMAND_UIC, 'SetFunc', [('function', function)])", "def map(self, function):\n pass", "def visit_ioa_shorthand(self, typ: ast.Call):\n pass", "def __call__(a, b):", "def get_call_value(call_node):\n s = get_name_value(call_node.func)\n if isinstance(call_node.func.ctx, ast.Load):\n # convert ast args to literals\n args = [convert_arg(a) for a in call_node.args]\n # suround literal strings with a set of quotes for easy placing into\n # a string\n args = ['\"' + a + '\"' if isinstance(a, str) else a for a in args]\n # join all the args into a set of parens\n s += \"(\" + \",\".join(args) + \")\"\n return s", "def call(self, x, mask=None):", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def f_set(self, *args, **kwargs):\n if args and self.v_name is None:\n raise AttributeError(\n \"Cannot set positional value because I do not have a name!\"\n )\n for idx, arg in enumerate(args):\n valstr = self.f_translate_key(idx)\n self.f_set_single(valstr, arg)\n\n for key, arg in kwargs.items():\n self.f_set_single(key, arg)", "def Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_Set(*args)", "def SetLogicalFunction(*args, **kwargs):\n return _gdi_.PseudoDC_SetLogicalFunction(*args, **kwargs)", "def sets(set_id, set_name, series):\n if (set_id):\n format_set_info(find_set(set_id))\n else:\n params = build_up_set_params(set_name, series)\n print(params)\n param_list=''\n for k, v in params.items():\n param_list += (f'{k}:\"{v}\" ')\n param_list = param_list.strip()\n click.echo(param_list) \n sets = Set.where(q=param_list)\n for pset in sets:\n format_set_info(pset)", "def any_function(x):\n return x ** x # here we can hardcode any function", "def SetOperator(self, op):\n return _hypre.HypreFGMRES_SetOperator(self, op)", "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def map(self, callable, iterable):\n iterable = executor.get_actual_value(iterable)\n return super(Executor, self).map(callable, iterable)", "def __call__(self, x, **kwargs):\n del kwargs\n for f in self._functions:\n x = f(x)\n return x", "def eval(*args, **kwargs):\n\n pass", "def power(set_):\n ensure_countable(set_)\n\n result = chain.from_iterable(combinations(set_, r)\n for r in xrange(len(set_) + 1))\n return _harmonize_subset_types(set_, result)", "def __call__(self, expression):\n self.set_expression(expression)", "def on_apply(self, node):\n if node.inputs[0].is_constant(Primitive):\n fn = node.inputs[0].value\n conv = MAP.get(fn)\n if conv is not None:\n return conv(self, *node.inputs[1:])\n return relay.Call(self.ref(node.inputs[0]),\n [self.ref(i) for i in node.inputs[1:]])", "def chainable(fn: Callable):\n # @validate_arguments\n @functools.wraps(fn)\n def setter_wrapper(self, *args: Any, **kwargs: Any) -> Any:\n fn(self, *args, **kwargs)\n return self\n\n return setter_wrapper", "def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)", "def _eval_rewrite_as_Union(self, *sets, **kwargs):\n\n dj_union = S.EmptySet\n index = 0\n for set_i in sets:\n if isinstance(set_i, Set):\n cross = ProductSet(set_i, FiniteSet(index))\n dj_union = Union(dj_union, cross)\n index = index + 1\n return dj_union", "def SetLogicalFunction(*args, **kwargs):\n return _gdi_.DC_SetLogicalFunction(*args, **kwargs)", "def XCAFDoc_ShapeTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_Set(*args)", "def inner(self, value):\n if value is None:\n return func(self, None)\n elif isinstance(value, datetime.datetime):\n return func(self, value)\n else:\n value = TS_SETTER_TRANSFORM_RE.sub(TS_SETTER_TRANSFORM_REPL, value)\n return func(self, iso8601.parse_date(value))" ]
[ "0.6105887", "0.60869235", "0.6042422", "0.594492", "0.58734345", "0.5806317", "0.5636314", "0.5612864", "0.5483707", "0.546873", "0.54369307", "0.5421345", "0.5389135", "0.5195353", "0.5110589", "0.5105132", "0.5087218", "0.5083517", "0.5051965", "0.50517464", "0.5040997", "0.50280774", "0.50219023", "0.50212556", "0.501696", "0.501289", "0.50051874", "0.4995381", "0.49790686", "0.4975022", "0.4965564", "0.49629685", "0.49582162", "0.49559686", "0.49526095", "0.49429658", "0.4940435", "0.4935887", "0.49313614", "0.49122488", "0.48892093", "0.4887059", "0.48684692", "0.4859447", "0.48535824", "0.48375008", "0.48250327", "0.48249188", "0.48111767", "0.48009652", "0.47919145", "0.47891635", "0.4781911", "0.47819012", "0.47804242", "0.4770329", "0.47685626", "0.47592193", "0.47524053", "0.474376", "0.47402364", "0.47298345", "0.4726411", "0.47256646", "0.47251046", "0.47228298", "0.47181898", "0.47157058", "0.47062507", "0.47036391", "0.4702459", "0.4697428", "0.46955684", "0.4671219", "0.46692884", "0.46528256", "0.46524733", "0.46521932", "0.4648241", "0.46410817", "0.4639665", "0.46348923", "0.46297902", "0.46283334", "0.4622352", "0.4617921", "0.46171045", "0.46094155", "0.4606169", "0.4601384", "0.46008962", "0.45985997", "0.45884097", "0.45697135", "0.4569255", "0.45679644", "0.4564243", "0.4563731", "0.45618513", "0.45545667" ]
0.6132109
0
Convert dictionary literal to function call, if possible.
def visit_Dict(self, node): self.generic_visit(node) if all(isinstance(key, ast.Str) for key in node.keys): keywords = [ ast.keyword(arg=key.s, value=value) for key, value in zip(node.keys, node.values) ] return to_call(to_name('dict'), keywords=keywords) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(**seldict)", "def _from_yaml_to_func(method, params):\r\n prm = dict()\r\n if params is not None:\r\n for key, val in params.items():\r\n prm[key] = eval(str(val))\r\n return eval(method)(**prm)", "def _from_yaml_to_func(method, params):\n prm = dict()\n if params is not None:\n for key, val in params.iteritems():\n prm[key] = eval(str(val))\n return eval(method)(**prm)", "def parse_argdict(extras):\n return [(key, value() if callable(value) else value) for key, value in extras.items()]", "def call(self, func):\n\t\targs = tuple(self.__dict__.values())\n\t\ttry:\n\t\t\treturn eval(\"func\" + str(args))\n\t\texcept Exception, e:\n\t\t\traise ValueError(\"Given Function is not valid for calling: %s\" % e)", "def map_named(function: Callable[[str, Any], Any],\n val: Any,\n key: Optional[str] = \"\") -> Any:\n if isinstance(val, Mapping):\n return type(val)(\n **{k: map_named(function, v, key + \"/\" + k) for k, v in val.items()})\n elif isinstance(val, tuple) or isinstance(val, list):\n return type(val)(\n *\n [map_named(function, v, key + \"/\" + str(i)) for i, v in enumerate(val)])\n # check if it's a flax dataclass\n elif hasattr(val, \"__dataclass_fields__\"):\n classname = repr(val).split(\"(\")[0]\n return type(val)(**{\n k: map_named(function, v, f\"{key}/{classname}.{k}\")\n for k, v in val.__dataclass_fields__.items()\n })\n else:\n return function(key, val)", "def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))", "def expand_call(kargs):\n func = kargs['func']\n del kargs['func']\n out = func(**kargs)\n return out", "def test_function_statement():\n r = convert_code(\"{foo arg1=bar arg2=3}\")\n assert r == \"{{ {'arg1': bar, 'arg2': 3}|foo }}\"", "def test_function_statement_at_operator():\n r = convert_code(\"{@foo arg1=bar arg2=3}\")\n assert r == \"{{ {'arg1': bar, 'arg2': 3}|foo }}\"", "def exec_init(self, key, value, **_):\n return value", "def translator(dict):\n f = lambda match: dict.get(match.group(), match.group())\n return lambda expression: _word_pattern.sub(f,expression)", "def map_values(fun, a_dict):\n return dict((k, fun(v)) for (k, v) in a_dict.items())", "def map_values(function, dictionary):\n return {k: function(dictionary[k]) for k in dictionary}", "def dict(dict: Dict[str, Pin], /) -> None:", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def selectArgsFromDict(func, argdict):\n return dict([(i, argdict[i]) for i in getArgs(func) if i in argdict])", "def _load_from_callable(name, kwds, converters={}):\n # See if we actually have the named object.\n dotted_name = kwds.pop(name, None)\n if dotted_name is None:\n return None\n obj = resolveDotted(dotted_name)\n # Extract any arguments for the callable.\n obj_kwds = {}\n prefix = name + \"_\"\n for key in kwds.keys():\n if key.startswith(prefix):\n obj_kwds[key[len(prefix):]] = kwds.pop(key)\n # To any type conversion on the arguments.\n for key, value in obj_kwds.iteritems():\n converter = converters.get(key)\n if converter is not None:\n obj_kwds[key] = converter(value)\n # Call it if callable.\n if callable(obj):\n obj = obj(**obj_kwds)\n elif obj_kwds:\n raise ValueError(\"arguments provided for non-callable %r\" % (name,))\n return obj", "def __call__(self, word, *args):\n return self.value[word](*args)", "def eval(*args, **kwargs):\n\n pass", "def transition_fnc_from_mapping(transition_mapping):\n def transition_fnc(fsm, symbol):\n try:\n return transition_mapping[symbol]\n except KeyError:\n raise KeyError(\"unable to find transition for symbol: %r\" % (symbol,))\n # make transition mapping available through the function\n transition_fnc.mapping = transition_mapping\n return transition_fnc", "def _transform_value_callable(\n self, value: dict | list | str, c: Callable, kwargs=None\n ) -> str | None | list[str] | None:\n # find signature of method and call with correct args\n kwargs = kwargs or {}\n try:\n sig = signature(c, follow_wrapped=True)\n if 'ti_dict' in sig.parameters:\n kwargs.update({'ti_dict': self.ti_dict})\n if 'transform' in sig.parameters:\n kwargs.update({'transform': self})\n except ValueError: # signature doesn't work for many built-in methods/functions\n pass\n\n # pass value to transform callable/method, which should always return a string\n return c(value, **kwargs)", "def flexdictargs(func: Callable[[dict], RT]) -> Callable[[Iterable, Any], RT]:\n\n @wraps(func)\n def f(self, *args, **kwargs):\n if args and isinstance(args[0], MutableMapping):\n d = args[0]\n elif kwargs:\n d = kwargs\n else:\n raise TypeError(\"invalid input arguments\")\n return func(self, normalize(d))\n\n return f", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def get_function_from_text(f):\n return lambda x: eval_expr(f, {'x': x}, numpy_dict)", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def _eval_params(trial, params: Dict[str, Any]) -> Dict[str, Any]:\n prepared = dict()\n for arg, value in params.items():\n if isinstance(value, dict):\n # Extract method.\n name = list(value.keys())[0]\n # Add prefix.\n method = \"suggest_\" + name\n # Get method kwargs.\n kwargs = value[name]\n # Add name arg.\n kwargs.update({\"name\": arg})\n # Evaluate method.\n value = getattr(trial, method)(**kwargs)\n prepared.update({arg: value})\n return prepared", "def dict_operate(dict, key, value, operation=None):\n if key in dict and operation is not None:\n dict[key] = operation(dict[key], value)\n else:\n dict[key] = value", "def type_cast(func,data_entry,*args):\n assert isinstance(data_entry,str)\n assert callable(func)\n try:\n out=func(data_entry,*args)\n except:\n out=None\n return out", "def json_to_call(name: str) -> Optional[CallDef]:\n if name not in json:\n logger.warning(f\"No field '{name}' in the row generator JSON\")\n return None\n\n call: Dict[int, str] = {}\n for key, value in json[name].items():\n try:\n index = int(key)\n except ValueError as e:\n raise_error(name, f\"Call index '{key}' is not a valid integer\", e)\n call[index] = value\n return CallDef(call)", "def valuecall(key, atom_dict):\n if key not in atom_dict:\n return 0\n else:\n return atom_dict[key]", "def cast_arguments(cast_dict):\n\n def decorator(func):\n def wrapper(request):\n request_params = get_dict_from_request(request)\n request_params = request_params.copy()\n for param in cast_dict:\n if param not in request_params:\n continue\n try:\n request_params[param] = cast_dict[param](\n request_params[param])\n except (ValueError, TypeError) as e:\n return APIInvalidArgumentResponse(error_msg=str(e))\n setattr(request, request.method, request_params)\n return func(request)\n\n return wrapper\n\n return decorator", "def dict2argstr(d: Dict[str, Any]) -> str:\n return \",\".join(\"{!s}={!r}\".format(key, val) for (key, val) in d.items())", "def _lookup_wrapper(d):\n def _inner(key):\n return d[key]\n return _inner", "def __getattr__(self, key):\n return self._func_for_key(key)", "def _call_function(self, svcname, fcallstr):\n try:\n argv = json.loads(fcallstr)\n except Exception as e:\n raise SearpcError('bad call str: ' + str(e))\n\n service = self.services[svcname]\n\n fname = argv[0]\n fn = service.func_table.get(fname, None)\n if fn is None:\n raise SearpcError('No such funtion %s' % fname)\n\n ret = fn(*argv[1:])\n return ret", "def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)", "def loads_json(function):\n def f(*args, **kwargs):\n return json.loads(function(*args, **kwargs))\n return f", "def recursive_compile(sf_dict):\n retval = {}\n for key, val in sf_dict.items():\n if isinstance(val, dict):\n retval[key] = recursive_compile(val)\n else:\n retval[key] = dense_evaluated_lookup(*val)\n return retval", "def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)", "def _token_callable(token: TOKEN, local_dict: DICT, global_dict: DICT, nextToken=None):\n func = local_dict.get(token[1])\n if not func:\n func = global_dict.get(token[1])\n return callable(func) and not isinstance(func, Symbol)", "def json_in(fn):\n @wraps(fn)\n def new(arg):\n # convert the args in JSON to a python object\n arg = json.loads(arg)\n return fn(arg)\n return new", "def constant_transform(input_dict, constant):\n return {\n \"data\": constant,\n }", "def transform_call(call):\n return {\n 'type': 'call',\n 'chain': [str(fn.name) for fn in call.names()],\n 'arguments': [str(arg) for arg in call.arguments()],\n 'body': transform_block(call.body())\n }", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def test_function_statement2():\n r = convert_code(\n \"{foo arg1=bar[1] arg2=foo.bar.foo arg3=foo.bar[3] arg4=foo.bar.awesome[3] }\")\n assert r == \"{{ {'arg1': bar[1], 'arg2': foo.bar.foo, 'arg3': foo.bar[3], 'arg4': foo.bar.awesome[3]}|foo }}\"", "def act_on_dict(output_names=None, input_names=None, mode='add'):\n def wrapper(func):\n assert mode in ACTING_MODES, f'mode has to be one of {ACTING_MODES}'\n # use names of return variables of func if keys to save returned values is not specified\n if output_names is None:\n provides = extract_return(func)\n else:\n provides = output_names\n\n # use argument names in case keys to get input values is not specified\n if input_names is None:\n args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations = inspect.getfullargspec(func)\n requires = (args if defaults is None else args[:len(args) - len(defaults)]) + \\\n (kwonlyargs if kwonlydefaults is None else kwonlyargs[:len(kwonlyargs) - len(kwonlydefaults)])\n uses = args + kwonlyargs\n else:\n args = input_names\n varkw = None\n kwonlyargs = []\n\n requires = args\n uses = args\n\n # define function to act on dictionary\n def inner(dictionary):\n # check that all required arguments are present\n for arg in inner.requires:\n assert arg in dictionary, \\\n f\"key '{arg}' whose value is required by function '{func.__name__}' is missing\"\n\n # apply function\n if input_names is not None:\n returns = func(*(dictionary[arg] for arg in args))\n elif varkw is not None:\n returns = func(**dictionary)\n else:\n returns = func(\n **{arg: dictionary[arg] for arg in args if arg in dictionary},\n **{kwonlyarg: dictionary[kwonlyarg] for kwonlyarg in kwonlyargs if kwonlyarg in dictionary})\n\n # add to input or construct new dict based on mode\n if mode == 'add':\n result = dictionary\n else:\n result = {}\n for name, value in zip(provides, returns):\n result[name] = value\n\n return result\n\n # add attributes to function specifying which keys are required, used, provided\n inner.requires = requires\n inner.uses = uses\n inner.provides = provides\n\n return inner\n\n if callable(output_names):\n func = output_names\n output_names = None\n return wrapper(func)\n else:\n return wrapper", "def dict_option(s):\n return _convert(s, (dict,))", "def __convert_key(expression):\n if type(expression) is str and len(expression) > 2 and expression[1] == '!':\n expression = eval(expression[2:-1])\n return expression", "def computed_values(d):\r\n result = {}\r\n for k, v in six.iteritems(d):\r\n if callable(v):\r\n v = v()\r\n if isinstance(v, dict):\r\n v = computed_values(v)\r\n result[k] = v\r\n return result", "def eval(*args, **kwargs)->Any:\n pass", "def test_fn_call_with_dict():\n l = [1, 2, 3, 4, 5]\n ds = [defaultdict(int), defaultdict(int), defaultdict(int)]\n for d in ds:\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called_with_dict(dict_=d, fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in d.keys() and d[fn] == (i + 1)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__", "def to_dict(funs):\n def to_dict_funs(an_object):\n return dict((k, f(an_object)) for (k, f) in funs.items())\n return to_dict_funs", "def dict(self, arg: SeField[Any]) -> str:\n if is_bare_dict(arg.type):\n return arg.varname\n else:\n karg = arg[0]\n karg.name = \"k\"\n varg = arg[1]\n varg.name = \"v\"\n return f\"{{{self.render(karg)}: {self.render(varg)} for k, v in {arg.varname}.items()}}\"", "def __call__(self, spec):\n if isinstance(spec, dict):\n return self.dict(spec)\n if isinstance(spec, tuple):\n return self.tuple(spec)\n if isinstance(spec, list):\n return self.list(spec)\n if isinstance(spec, Either):\n return self.either(spec)\n if isinstance(spec, Options):\n return self.option(spec)\n return {\n str: self.str,\n float: self.float,\n int: self.int,\n bool: self.bool,\n numpy.ndarray: self.ndarray,\n hoomd.variant.Variant: self.variant,\n None: self.none\n }[spec]()", "def multi_mapping(func_name, arg_value_pairs, module_name = \"__main__\"):\n func, arg_names = get_function_args(module_name = module_name, function_name = func_name)\n \n return list(map(lambda arg_value_pair: call_func_dynamically(function_name = func_name, \n argument_names = arg_names, \n arg_value_pair = arg_value_pair) ,\n arg_value_pairs))", "def _literal_params(params):\n if isinstance(params, dict):\n return params\n try:\n params = json.loads(params) if params else {}\n except Exception as e:\n abort(400, f\"{e}\\Error loading JSON params, of type {type(params)}\")\n if not isinstance(params, dict):\n abort(400, \"Parsed params are not a valid map (dict)\")\n return params", "def with_calculated(funs):\n def with_calculated_funs(a_dict):\n return updated_with(a_dict, to_dict(funs)(a_dict))\n return with_calculated_funs", "def test_str_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='{\"json\": \"Not allowed.\"}'), 3113\n )", "def call_func_dynamically(function_name, argument_names, arg_value_pair, module_name = \"__main__\"):\n # mapping between arg name and arg value\n arg = list(map(lambda arg_name, arg_value: str_join([arg_name, arg_value], \"=\"), argument_names, arg_value_pair))\n \n # make function call expresion\n func_call = function_name + \"(\" + str_join(arg, \",\") + \")\"\n \n # result\n result = eval(func_call, {function_name : getattr(sys.modules[module_name], function_name)})\n \n return result", "def test_star_args_with_dict():\n arg_dict = {'visited_color': 'orange',\n 'link_color': 'yellow',\n 'back_color': 'red',\n 'fore_color': 'blue'}\n assert arguments.fun_star_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def _get_command_lookup(self, command_dict):", "def __call__(self, *args, **kwargs):\n for key, obj in self._dict.items():\n key[0](obj, *args, **kwargs)", "def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:\n if not input_dict:\n return ''\n out = json.dumps(json.dumps(input_dict))\n return out[1:-1] # remove the outside quotes, e.g., \"foo\" -> foo", "def get_key(dictionary: dict, *args) -> Union[str, bool, dict]:\n data = reduce(lambda c, k: c.get(k, {}), args, dictionary)\n if data == {}:\n return \"\"\n return data", "def convert_term(var_map, s, t):\n def convert(t):\n if t.head in var_map:\n if len(t.args) == 0:\n return s(Ident(to_binary(var_map[t.head])))\n elif len(t.args) == 1:\n return s(Para(Ident(to_binary(var_map[t.head])), t.arg))\n else:\n raise NotImplementedError\n elif t.is_equals():\n return Term.mk_equals(convert(t.arg1), convert(t.arg))\n elif logic.is_neg(t):\n return logic.neg(convert(t.arg))\n elif logic.is_conj(t):\n return logic.conj(convert(t.arg1), convert(t.arg))\n elif logic.is_disj(t):\n return logic.disj(convert(t.arg1), convert(t.arg))\n elif t.get_type() == boolT:\n return BoolV(t)\n elif t.get_type() == natT:\n return NatV(t)\n else:\n raise NotImplementedError\n\n return convert(t)", "def enterParams(o, params):\n r = {}\n for p in params:\n if isinstance(p, tuple):\n p, f = p\n else:\n f = str\n if hasattr(o, p):\n r[p] = f(getattr(o, p))\n return r", "def test_RestrictingNodeTransformer__visit_In_Dict():\n assert restricted_eval('2 in {1: 1, 2: 2, 3: 3}') is True", "def get_dict(key):\r\n name = f\"{key}_dict\"\r\n return eval(name)", "def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})", "def dict_to_code(mapping):\n lines = (\"{} = {}\".format(key, repr(value))\n for key, value in mapping.items())\n return '\\n'.join(lines)", "def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> NamedTuple:\n return super().load_state_dict(state_dict, *args)", "def substitute_keys_in_functions(functions, new_keys):\n for _, func in functions.items():\n func['ret_type'] = new_keys[func['ret_type']]\n substitute_params_keys(func['params'], new_keys)", "def evaluate(self, edict):\n pass", "def test_dict_to_int(self):\n @converters.wrap\n def inner_test(param: int):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param={'foo': 123}), 3111\n )", "def __call__(fun_name):", "def eval_function(function_string):\n for key,value in globals().items():\n if key == function_string and type(value) == types.FunctionType:\n value()\n return\n \n log.warn(\"Unrecognized option: \"+function_string.rsplit(\"_\",1)[0])", "def test_dict_keys_substr_passthrough(self):\n assert (\n orjson.dumps(\n {SubStr(\"aaa\"): True},\n option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_SUBCLASS,\n )\n == b'{\"aaa\":true}'\n )", "def test_takes_dict_or_callable(self):\n scope1 = Scope({ 'where': 'foo' })\n self.assertEqual(scope1.finder_options, { 'where': 'foo' })\n\n call = lambda(cls): cls.where('foo')\n scope2 = Scope(call)\n self.assertEqual(scope2.callable, call)", "def __call__(self, params):\n flat_state_dict = flatten_dict(to_state_dict(params))\n flat_rules_dict = {k: self['/'.join(k)] for k in flat_state_dict.keys()}\n return from_state_dict(params, unflatten_dict(flat_rules_dict))", "def jsonish_dict_to_rpc(dictionary, rpc_message_type):\n return protojson.decode_message(rpc_message_type, json.dumps(dictionary))", "def build_pfunc(cls, representation):\n if ut.is_str(representation):\n try:\n func = eval(representation)\n except:\n bf = 'cls.build_pfunc('\n af = ')'\n st = ut.parse_enclose_with_counter(representation , before = bf, after = af)\n func = eval(st)\n \n elif ut.is_dico(representation):\n name_func = representation['name_func']\n func = eval(name_func)(**representation)\n \n else:\n raise SystemError(\"build_custom_func can build a function from an \"\n \"object of tye {0}\".format(cls.__class__))\n \n return func", "def eval_expr(code, local_dict: DICT, global_dict: DICT):\n expr = eval(\n code, global_dict, local_dict) # take local objects in preference\n return expr", "def _do_dots(self, value, *dots):\n for dot in dots:\n try:\n value = getattr(value, dot)\n except AttributeError:\n try:\n value = value[dot]\n except (TypeError, KeyError) as exc:\n raise TempliteValueError(\n f\"Couldn't evaluate {value!r}.{dot}\"\n ) from exc\n if callable(value):\n value = value()\n return value", "def pull_key(key_fun):\n def pull_key_fun(objs):\n return dict((key_fun(value), value) for value in objs)\n return pull_key_fun", "def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)", "def returner_base(self, key, dict):\n try:\n value = dict[key]\n except KeyError:\n value = dict[key.lower()]\n return value", "def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )", "def __call__(self, key):\n\n def wrapper(func):\n self._registry[key] = func\n\n return wrapper", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def f(map, key):\n def decorator(function):\n map[key] = function\n return function\n return decorator", "def render(data_dict, *args, **kwargs):", "def __init__(self, func):\n self.dictionary = {}\n self.func = func", "def expr(runtime_addr, s):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr)\n\n if isinstance(s, dict):\n # Dictionary supplied.\n # Look up value in binary, and use that as key in dictionary\n val = get_u8_binary(binary_addr)\n classification.add_expression(binary_addr, s[val])\n else:\n classification.add_expression(binary_addr, s)", "def call_graph(graph, key, inputs):\n\n node = graph[\"nodes\"][key]\n acceptable = node[\"required\"] | set(node[\"optional\"])\n req = util.select_keys(lambda k, _: k in acceptable, inputs)\n args = util.merge(node[\"optional\"], req)\n\n return node[\"fn\"](**args)", "def Dict(**args):\n return args", "def acfunct(arg):\n try:\n functions = [dynet.rectify, dynet.tanh]\n functions = { function.__name__ : function for function in functions}\n functions[\"None\"] = None\n return functions[str(arg)]\n except:\n raise argparse.ArgumentTypeError(\"String {} does not match required format\".format(arg,))", "def json_decoder_hook(dct, str_decoders=STRING_DECODERS,\n converters=MappingProxyType(dict())) -> dict:\n\n for k, v in dct.items():\n if k in converters:\n parse_func = converters[k]\n dct[k] = parse_func(v)\n\n elif isinstance(v, str):\n for decode_func in str_decoders:\n v = decode_func(v)\n\n if not isinstance(v, str):\n break\n\n dct[k] = v\n elif isinstance(v, collections.Mapping):\n dct[k] = json_decoder_hook(v, str_decoders, converters)\n\n return dct", "def from_dict(cls, dikt) -> 'TransportCall':\n return util.deserialize_model(dikt, cls)", "def from_dict(source_dict: Dict[str, Any]) -> 'Statement':\n # value needs to be passed as a positional argument. It corresponds to the args field.\n try:\n value = source_dict[Statement._args_field]\n except KeyError as ex:\n raise KeyError(f\"Statement dictionary is missing {ex} field\")\n\n # Pass the rest at kwargs (minus args)\n kwargs = source_dict.copy()\n del kwargs[Statement._args_field]\n\n return Statement(value, **kwargs)" ]
[ "0.64434266", "0.63088524", "0.61875296", "0.6126158", "0.5683456", "0.5670447", "0.56030446", "0.5584195", "0.55224985", "0.5522239", "0.5520842", "0.54833007", "0.54591936", "0.53802735", "0.53729284", "0.5354577", "0.529072", "0.52826643", "0.524588", "0.5245523", "0.522427", "0.52211386", "0.5217707", "0.5192009", "0.5190313", "0.51771444", "0.5151063", "0.51153225", "0.50939167", "0.50826526", "0.50706154", "0.50339735", "0.5033085", "0.5019968", "0.50179565", "0.5017816", "0.4994486", "0.49871325", "0.4985475", "0.49831823", "0.49634904", "0.4953407", "0.49446112", "0.4939136", "0.49375352", "0.49148008", "0.49125767", "0.49079597", "0.49000272", "0.4892214", "0.4891759", "0.4888464", "0.48760214", "0.48702955", "0.4870221", "0.48557088", "0.48506254", "0.48457778", "0.48455405", "0.48334944", "0.4827736", "0.48160017", "0.48090664", "0.4791665", "0.47757447", "0.47744825", "0.4773463", "0.47720805", "0.47470665", "0.47404817", "0.4733772", "0.4732628", "0.47198808", "0.47160777", "0.4713273", "0.4704927", "0.47032997", "0.4701333", "0.46980575", "0.4696336", "0.46933067", "0.4684199", "0.4680393", "0.4659396", "0.46579668", "0.4654632", "0.46536908", "0.46528935", "0.4642042", "0.46187362", "0.46168277", "0.4616754", "0.46162662", "0.4609662", "0.46093133", "0.4605708", "0.460224", "0.4596539", "0.4595634", "0.4592485", "0.45739618" ]
0.0
-1
User inputs the of and names of all players
def addPlayers(): # stackoverflow.com/questions/12169258/should-i-use-entrys-get-or-its-textvariables-for-tkinter-in-python print("\nInitial # of players (Line #26) = " + str(self.number_of_players)) # Collect user input from the entry widget & turn it into an int while (self.number_of_players < 2): try: user_input_number_of_players = int(entry_player_number.get()) print("Inside try block, user_input = " + str(user_input_number_of_players)) if(user_input_number_of_players < 2): tkinter.messagebox.showerror('Non-Integer Input', 'User MUST enter a player # > 1.', icon='error') tkinter.messagebox.quit() tkinter.messagebox.destroy() user_input_number_of_players = int(entry_player_number.get()) else: self.number_of_players = user_input_number_of_players except ValueError: tkinter.messagebox.showerror('Non-Integer Input', 'User MUST enter a player # greater than 1.', icon='error') tkinter.messagebox.quit() tkinter.messagebox.destroy() # Add a label myLabel1b = tkinter.Label(self.root, text="Please Enter Player Names: ", width=25) myLabel1b.config(font="Courier 14 bold") myLabel1b.grid(row=2, column=1) # GET PLAYER NAMES FROM USER....USE A SCROLLING CANVAS FRAME #Make a scrollable frame appear # Scroll appears, but doesn't function # Code for scrollable frame came from: myframe = tkinter.Frame(root, relief=tkinter.GROOVE, width=100, height=100) myframe.grid(row=3, column=3, columnspan=2, pady=30, padx=30) myframe.config(width=5) # https://stackoverflow.com/questions/16188420/tkinter-scrollbar-for-frame self.tree = ttk.Treeview(myframe, selectmode="extended") scbVDirSel = ttk.Scrollbar(myframe, orient=tkinter.VERTICAL, command=self.tree.yview) self.tree.configure(yscrollcommand=scbVDirSel.set) # self.tree["columns"] = (self.columnListOutput) self.tree.column("#0", width=40) self.tree.heading("#0", text='SrNo', anchor='w') self.tree.grid(row=2, column=0, sticky=tkinter.NSEW, in_=myframe, columnspan=10, rowspan=10) scbVDirSel.grid(row=2, column=10, rowspan=10, sticky=tkinter.NS, in_=myframe) myframe.rowconfigure(0, weight=1) myframe.columnconfigure(0, weight=1) # put entry boxes for player names inside the scrollable frame for x in range(self.number_of_players): print(x+1) # Add a label myLabel1b = tkinter.Label(myframe, text="Player #" + str(x+1)+ ": ") myLabel1b.config(font="Courier 14 bold") myLabel1b.grid(row=4+x, column=3) # Fix this textVariable parameter - unecessary? # https://stackoverflow.com/questions/32640219/creating-stringvar-variables-in-a-loop-for-tkinter-entry-widgets # user_input_player_names = tkinter.StringVar() name_entries = [] for i in range(self.number_of_players): entry_player_name = tkinter.Entry(myframe, width=10, borderwidth=2) # entry_player_name.set(str(x+1)) entry_player_name.grid(row=4+x, column=4) name_entries.append(entry_player_name) # specify a default value inside the entry box # entry_player_number.insert(0,int("2")) # Add a button for adding players names into the game addPlayerNamesButton = tkinter.ttk.Button(self.root, text="Enter Names", command=get_player_names_from_user) addPlayerNamesButton.grid(row=self.number_of_players+2, column=4) # Make old label, entry, and button dissapear myLabel1.grid_forget() entry_player_number.grid_forget() addPlayerButton.grid_forget() print("# of players after button click = " + str(self.number_of_players)) # Set class instance value to this input from the user return self.number_of_players
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_players(self):\n for i in range(self.number_of_players):\n self.players_names.append(pyip.inputStr(\n prompt=f'\\nEnter name of player {i + 1}:\\n'))", "def set_name(self):\n player1 = input('Enter a name for player 1: ')\n self._players.append(player1)\n player2 = input('Enter a name for player 2: ')\n self._players.append(player2)\n print()\n return self._players", "def establish_players(n_players):\n usernames_out = [input('Please input a username for player ' +str(i)) for i in range(n_players)]\n return {'username':usernames_out}", "def get_players(n, playerspace):\n ps = []\n for i in range(n):\n name = \"\"\n while name == \"\":\n name = input(\"What's the name of player @ index {} (can't be empty): \".format(i))\n p = Player(name, i)\n p.playerspace = playerspace()\n ps.append(p)\n return ps", "def getPlayerName(i):\n while True:\n str_to_ask = \"Input name for Player \" + str(i) + \": \"\n name = input(str_to_ask).strip()\n if name != '': \n return name", "def initialize_players():\n while True:\n nb_of_players = input(\"\\nEntrez le nombre de joueurs : \")\n if not nb_of_players.isdigit():\n print(\"You have to enter a number!\")\n else:\n nb_of_players = int(nb_of_players)\n if nb_of_players < 2:\n print(\"You have to enter at least two!\")\n else:\n break\n nb_of_players = int(nb_of_players)\n list_of_players = [] #This list is going to be returned\n names_secure = [] #stores player's names in lower mode for security\n for index in range(1, nb_of_players+1):\n while True:\n player_name = input(\"Entrer le nom du joueur {} \".format(index))\n if (player_name.lower() == 'end' or player_name.lower() in names_secure):\n print(\"Incorrect Name\")\n else:\n names_secure.append(player_name.lower())\n new_player = Player(player_name)\n list_of_players.append(new_player)\n break\n return list_of_players", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def set_players(self, player_min: int, player_max: int):\n name: str = ' '\n print('Please give between %i and %i names for your players'\n % (player_min, player_max))\n while (name != '') and (len(self.players) < player_max):\n name = input('Players {}: '.format(len(self.players)+1))\n if name != '':\n self.players.append(Player(name))\n elif len(self.players) < player_min:\n name = ' '\n\n print()\n print('{} players registered.'.format(len(self.players)))\n print()", "def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True", "def get_player_name():\n\n player = list(input(\"\\nEnter the name of the Football player: \").split(\" \"))\n player_name = \" \".join([i.capitalize() for i in player])\n return player_name", "def get_player_name(self):\n if self.name_entered is False:\n self.name = self.input_name(\"Please enter your name:\")\n self.name_entered = True\n self.score_file()", "def player_name(player: Character) -> None:\r\n global censored_words\r\n while player.name == \"\":\r\n playerName = str(input(\"please enter player one's name: \"))\r\n for i in censored_words:\r\n if playerName == i:\r\n print(\"please choose another name\")\r\n playerName = \" \"\r\n elif playerName == playerTwo.name or playerName == playerOne.name:\r\n print(\"please choose another name\")\r\n playerName = \" \"\r\n else:\r\n player.name = playerName", "def collect_players_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded", "def ask_info_player(self) -> str:\n\n print(\"Enter first name : \")\n while True:\n first_name = input()\n if check.check_input_string_special(first_name) is True:\n if check.check_input_string_len(first_name) is True:\n if check.check_input_string_integer(first_name) is True:\n break\n\n print(\"Enter last name : \")\n while True:\n last_name = input()\n if check.check_input_string_special(last_name) is True:\n if check.check_input_string_len(last_name) is True:\n if check.check_input_string_integer(last_name) is True:\n break\n\n print(\"Enter date of birth with this format YEAR-MONTH-DAY : \")\n birthday = check.check_date_input()\n\n print(\n \"Enter a number for choose the gender : \\n\"\n \"1 - Man \\n\"\n \"2 - Women\"\n )\n genre = check.request_selection_with_number(\"Man\", \"Women\", \"none\")\n\n print(\"\\n The player {} {}, {}, birth on {} has been added to the database !\".format(\n first_name,\n last_name,\n genre,\n birthday))\n\n return first_name, last_name, birthday, genre", "def name_input():\n print('NAME CAN BE 10 CHARACTERS MAX. LETTERS, NUMBERS & UNDERSCORES ONLY')\n while True:\n player_name = input('PLEASE ENTER A TEAM NAME:\\n')\n if validate_team_name(player_name):\n break\n print(f'\\nTHE NAME YOU CHOSE IS: {player_name}\\n')\n print(PHASE)\n time.sleep(1)\n print(' ')\n return player_name", "def get_players_name(self, display, suffix_text=\"\"):\n player = \"\"\n text = self.FONT.render(f\"{suffix_text} Enter your name: {player}\", True, c.WHITE)\n display.blit(text, (self.SCORE_BOARD_X * 2, self.SCORE_BOARD_Y / 3))\n pygame.display.update()\n\n char_keys = [pygame.K_a, pygame.K_b, pygame.K_c, pygame.K_d, pygame.K_e, pygame.K_f, pygame.K_g, pygame.K_h,\n pygame.K_i, pygame.K_j, pygame.K_k, pygame.K_l, pygame.K_m, pygame.K_n, pygame.K_o, pygame.K_p,\n pygame.K_q, pygame.K_r, pygame.K_s, pygame.K_t, pygame.K_u, pygame.K_v, pygame.K_w, pygame.K_x,\n pygame.K_y, pygame.K_z\n ]\n keep_going = True\n\n while keep_going:\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key in char_keys:\n player += str(chr(event.key))\n if event.key == pygame.K_BACKSPACE:\n text = self.FONT.render(f\"{suffix_text} Enter your name: {player}\", True, c.BLACK)\n display.blit(text, (self.SCORE_BOARD_X * 2, self.SCORE_BOARD_Y / 3))\n player = player[:-1]\n # TODO: Remove constant\n if event.key == pygame.K_RETURN or len(player) > 15:\n keep_going = False\n\n text = self.FONT.render(f\"{suffix_text} Enter your name: {player}\", True, c.WHITE)\n display.blit(text, (self.SCORE_BOARD_X * 2, self.SCORE_BOARD_Y / 3))\n pygame.display.update()\n\n return player", "def create_player():\n\n\t#TODO : Ajout d'une BDD des différents joueurs avec des scores et vérifier la présence des joueurs choisis dans cette BDD pour charger les scores\n\n\tactivator = ''\n\tinhibitor = ''\n\n\tprint(\"\\nEntrez le pseudo du joueur\",colors.GREEN + \"'Activator' : \" + colors.STOP, end = \"\")\n\tactivator = input()\n\n\tprint(\"\\nEntrez le pseudo du joueur\", colors.RED + \"'Inhibitor' : \"+colors.STOP, end = \"\")\n\tinhibitor = input()\n\n\t# Default usernames if not defined by users\n\tif len(activator) == 0:\n\t\tactivator = 'Activator'\n\n\tif len(inhibitor) == 0:\n\t\tinhibitor = 'Inhibitor'\n\n\t# Attribute to each player the status he chose\n\tData.current_player['Activator'] = activator\n\tData.current_player['Inhibitor'] = inhibitor\n\n\treturn activator, inhibitor", "def get_game_ready():\n\tnum_players = int(input(\"\"\"How many players will be playing today? (between 2 and 5): \"\"\"))\n\twhile num_players > 5 or num_players < 2:\n\t\tnum_players = int(input(\"\"\"Between 2 and 5 players please: \"\"\"))\n\tnum_number_of_people = int(input(\"\"\"How many of these players will be humans?: \"\"\"))\n\twhile num_number_of_people > num_players or num_number_of_people < 0:\n\t\tnum_number_of_people = int(input(f\"\"\"Please enter a number equal to or less than the number of players ({num_players}): \"\"\"))\n\tnum_people = num_number_of_people\n\twhile num_people > 0:\n\t\tNAMES[abs(num_people - num_number_of_people)] = input(f\"\"\"Name of player {abs(num_people - num_number_of_people)+1}: \"\"\")\n\t\tnum_people -= 1\n\twhile len(NAMES) > num_players:\n\t\tNAMES.pop()\n\treturn NAMES", "def __add_players(self):\n players_list = []\n players_list.extend([(\"NEW PLAYER\", \"**new**\")])\n players_list.extend(self._roster.get_roster())\n players_list.extend([(\"BACK TO MENU\", \"**menu**\")])\n\n players = [\n inquirer.List(\n 'selection',\n message=\"ADD/REMOVE (Use ↑ and ↓ to select, ENTER to confirm)\",\n choices=players_list,\n default=\"NEW PLAYER\",\n carousel=True)\n ]\n\n self.clear_screen()\n self.__print_logo()\n selection = inquirer.prompt(players)['selection']\n\n if selection == \"**menu**\":\n pass\n elif selection == \"**new**\":\n name = self.__prompt_name()\n if name:\n self._roster.add_player(name)\n else:\n delete = inquirer.confirm(\n f\"Do you want to remove '{selection}'?\", default=True\n )\n if delete:\n self._roster.remove_player(selection)\n input(f\"'{selection}' removed. Press ENTER to continue.\")", "def inform_players(list_of_players):\n for player in list_of_players:\n player.show_cards_beginning()\n input(\"Press enter to pass your turn\")\n print()", "def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])", "def add_player(self):\n title = \"Bienvenue dans le gestionnaire de tournois d'échec.\\nAjout d'un joueur\"\n subtitle = \"Saisir dans l'ordre :\\n\"\n\n menu = {1: ('', \"Nom du joueur\"),\n 2: ('', \"Prénom du joueur\"),\n 3: ('', \"Date de naissance (Format dd/mm/aaaa)\"),\n 4: ('', \"Sexe (H/F)\")}\n\n self.view_menu.display_menu(title=title, subtitle=subtitle, question=menu)\n\n choice = ('name', 'first_name', 'dob', '_genre')\n response = []\n\n for i in range(4):\n if 0 <= i <= 1: # pour les question nom et prénom\n\n valid = self.ask_and_store_text(menu[i + 1][1] + ' : ')\n while not valid[0]:\n valid = self.ask_and_store_text(menu[i + 1][1] + ' : ')\n response.append(valid[1])\n\n elif i == 2: # pour la date de naissance\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n while not self._control_user_input(\"dob\", valid):\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n response.append(valid)\n\n elif i == 3: # pour la saisie du genre\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n while not self._control_user_input(\"_genre\", valid):\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n response.append(valid)\n\n res = dict(zip(choice, response))\n Player(**res)\n Player.save_all_players()\n self.menu_players()", "def __prompt_name(self):\n self.clear_screen()\n self.__print_logo()\n\n name = input(\"[!] Enter new player name and press ENTER:\\n\\n \")\n if not (2 < len(name) < 16):\n self.clear_screen()\n self.__print_logo()\n print(\"Username must be between 3 and 15 characters.\")\n input(\"Press ENTER to return to player menu.\")\n elif name in self._roster.get_roster():\n self.clear_screen()\n self.__print_logo()\n print(\"Player already exists.\")\n input(\"Press ENTER to return to player menu.\")\n else:\n return name", "def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)", "def collect_players_and_suspects_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded, suspects_decoded", "def set_players():\n \n while True:\n players = eval(input(\"Geben Sie die Anzahl Spieler an oder tippe '0' zum Abbruch: \"))\n if int(players) > 0:\n break\n elif int(players) == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine positive Ganzzahl eingeben!\")\n print()\n print()\n return players", "def __init__(self):\n player_1 = input('Player 1, Enter your name: ')\n player_2 = input('Player 2, Enter your name: ')\n self.__fields = [Field(), Field()]\n self.__players = [Player(player_1), Player(player_2)]\n self.__current_player = 0\n self.__next_player = 1", "def throwing_proposition(list_of_players):\n list_of_players_name = [player.name.lower() for player in list_of_players]\n while True:\n print(\"\\nIf you want to throw a card over {},\".format(Card.rejected_cards[-1].name))\n print(\"Write your name and press enter\")\n print(\"If everyone's done enter 'END' to continue the round\")\n user_entry = input().lower()\n if user_entry == 'end':\n break\n if user_entry in list_of_players_name:\n get_player_index = list_of_players_name.index(user_entry)\n player = list_of_players[get_player_index]\n player.throw()\n print(\"Anyone else?\")\n else:\n print(\"Incorrect entry\")", "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)", "def display_tournament_player_list(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n player_list = list()\r\n for rated_player in tournament['Classement']:\r\n player_list.append(players_table.get(doc_id=rated_player[0]))\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les joueurs du tournoi de\", tournament_name, \": \")\r\n if user_choice == '1':\r\n player_list.sort(key=lambda x: x['Nom'])\r\n for player in player_list:\r\n print(player)\r\n elif user_choice == '2':\r\n player_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in player_list:\r\n print(player)", "def createPlayers():\r\n while True:\r\n try:\r\n num_players = abs(int(raw_input(\"How many players?: \")))\r\n if num_players == 0:\r\n raise ValueError\r\n break\r\n except KeyboardInterrupt:\r\n raise KeyboardInterrupt\r\n except ValueError:\r\n print \"Invalid input\"\r\n\r\n players = {}\r\n for player_key in xrange(1,num_players+1):\r\n players[player_key] = players.get(player_key,0)\r\n return players", "def create_players():\n\n char_pairings = {\"X\":\"O\",\"O\":\"X\"}\n\n # Create player1\n name_1 = input(\"Player 1, what is your name? > \")\n char_1 = \"\"\n \n # Force player to choose valid input\n while char_1 not in char_pairings:\n char_1 = input(\"Would you like to be X or O? > \").upper()\n player_1 = Player(name_1, char_1)\n\n # Create player2\n name_2 = input(\"Player 2, what is your name? > \")\n\n print(\"{}, you are {}.\".format(name_2, char_pairings[char_1]))\n char_2 = char_pairings[char_1]\n\n player_2 = Player(name_2, char_2)\n\n return (player_1, player_2)", "def names():\n return render_template('playernames.html')", "def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass", "def player_input():\n x_o = ['X', 'O']\n player = \"\"\n while True:\n player = input('Choose your player X or O: ')\n if player.upper() in x_o:\n break\n else:\n print('It is neither X nor O! Choose again:')\n player = player.upper()\n print(f\"Your player is {player}\")\n return player", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id", "def get_participating_players(raw_input=raw_input):\n no_players = 0\n while no_players != 1 and no_players != 2:\n inp = raw_input(\"Single player or multiplayer? (1/2): \")\n try:\n no_players = int(inp)\n except ValueError:\n print \"Invalid input - please try again\"\n pass\n\n if no_players is 1:\n return (HumanPlayer('X'), ComputerPlayer('O'))\n else:\n return (HumanPlayer('X'), HumanPlayer('O'))", "def name_choice():\n username=\"\"\n while username ==\"\": #if value of user is empty the loop continue\n username=input(\"please enter your name or pseudo\")\n return username\n print(\"welcome on roulette game \",username,\" !\")", "def describe_game(name):\n #meaning ,if we do not already have this user's name,\n #then they are a new player and we need to get their name\n if name !=\"\":\n print(\"\\n tahnk you for playing again, {} !\".format(name))\n else:\n stop=True\n while stop:\n if name==\"\":\n name=input(\"\\n What is your name? \\n >>>\").capitalize()\n if name!=\"\":\n print(\"\\n Welcome, {}!\".format(name))\n print(\"\\n In this game, you will be greeted \\n by several different people. \\n You can choose to be nice or mean\")\n print(\"but at the end of the game your fate \\n will be sealed by your actions\")\n stop=False\n return name", "def find_player(argin, argtype, host='localhost', root='root', password=''):\r\n con = pymysql.connect(host, root, password)\r\n results = []\r\n\r\n with con.cursor() as cur:\r\n cur.execute(f\"\"\"USE {DB_NAME};\"\"\")\r\n\r\n for inp in argin:\r\n if argtype == \"name\":\r\n if len(inp.split()) == 1:\r\n cur.execute(f\"\"\"SELECT * FROM players WHERE last_name LIKE '%{inp}%'\"\"\")\r\n else:\r\n first_name = inp.split()[0]\r\n last_name = inp.split()[-1]\r\n cur.execute(f\"\"\"SELECT * FROM players WHERE\r\n first_name LIKE '%{first_name}%' AND\r\n last_name LIKE '%{last_name}%'\"\"\")\r\n else:\r\n cur.execute(f\"\"\"SELECT * FROM players WHERE {argtype}='{inp}'\"\"\")\r\n\r\n result = cur.fetchall()\r\n if result:\r\n results.append(result)\r\n\r\n else:\r\n first, last = add_player(inp, argtype, host, root, password)\r\n con.commit()\r\n cur.execute(f\"\"\"SELECT * FROM players WHERE\r\n first_name='{first}' AND\r\n last_name='{last}'\"\"\")\r\n result = cur.fetchall()\r\n if result:\r\n results.append(result)\r\n else:\r\n logger.info(f\"{inp} was not found on site.\")\r\n\r\n con.close()\r\n return results", "def greeting(players_name):\n print \"\\nGreat! Welcome, \" + players_name + \". The purpose of this game is to fill in the blanks for all the sentences provided.\"", "def player(self, name):\n\n self.name = name\n q = Query()\n data = TinyDB('app/data/db_player.json').table('players')\n\n self.search_result = data.search(\n (q.name == self.name) |\n (q.surname == self.name)\n )\n\n if len(self.search_result) == 0:\n v_menu.View().search('player_none')\n return 'None'\n\n elif len(self.search_result) == 1:\n v_menu.View().search_players(\n 'find_player',\n self.search_result[0]['name'],\n self.search_result[0]['surname'],\n self.search_result[0]['birthday'],\n self.search_result[0]['rank']\n )\n return self.search_result[0]['id']\n\n elif len(self.search_result) >= 2:\n for i in range(len(self.search_result)):\n v_menu.View().search_players(\n 'find_players',\n self.search_result[i]['name'],\n self.search_result[i]['surname'],\n self.search_result[i]['birthday'],\n self.search_result[i]['rank'], i+1\n )\n\n self.player_number = c_input.Input().select_menu_number(\n len(self.search_result))\n\n return self.search_result[self.player_number-1]['id']", "def createPlayers(self, board):\n tempVar = 0\n while tempVar == 0:\n try:\n numPlayers = int(input(\"How many players do you want to have?\\n\"))\n if numPlayers >= 2 and numPlayers <= 6:\n tempVar = 1\n else:\n print(\"Thats not the right amount of players please have 2-6 players.\")\n except:\n print(\"That number wont work please enter a number between 2 and 6.\")\n\n for i in range(numPlayers):\n tempState = False\n os.system('clear')\n name = input(\"What is the name of player \" + str(i + 1) + \"?\\n\")\n if i == 0:\n player1 = playerClass.player(board, name)\n while tempState == False:\n print(player1, \"has an initial roll of\", player1.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player1)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")\n elif i == 1:\n player2 = playerClass.player(board, name)\n while tempState == False:\n print(player2, \"has an initial roll of\", player2.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player2)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")\n elif i == 2:\n player3 = playerClass.player(board, name)\n while tempState == False:\n print(player3, \"has an initial roll of\", player3.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player3)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")\n elif i == 3:\n player4 = playerClass.player(board, name)\n while tempState == False:\n print(player4, \"has an initial roll of\", player4.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player4)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")\n elif i == 4:\n player5 = playerClass.player(board, name)\n while tempState == False:\n print(player5, \"has an initial roll of\", player5.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player5)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")\n elif i == 5:\n player6 = playerClass.player(board, name)\n while tempState == False:\n print(player6, \"has an initial roll of\", player6.getIR(), \"\\n\")\n time.sleep(0.1)\n tempState = self.addPlayers(player6)\n if tempState == False:\n print(\"Thats a tie. Rerolling ...\\n\")\n time.sleep(0.5)\n os.system('clear')\n else:\n input(\"Press enter to continue\")", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def handlenames():\n\n while True:\n name = input(\"Please enter full name or list> \")\n if name == \"\":\n print(\"Please enter valid name.\\n\")\n elif name == \"list\":\n printdonorlist()\n break\n elif name in donor_db:\n getdonationamount(name)\n printthankyou(name)\n break\n else:\n addnewdonordonation(name)\n getdonationamount(name)\n printthankyou(name)\n break", "def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass", "def names_interaction():\n already_printed = []\n for protocol in protocols:\n for account in protocol.accounts:\n for contact in account.contacts:\n for message in contact.messages:\n if message.name not in already_printed:\n already_printed.append(message.name)\n print(message.name)\n nicks = input(\"Own nicks, comma separated: \")\n nicks = nicks.split(\",\")\n nicks = [nick.strip() for nick in nicks]\n return nicks", "def greet() -> None:\n global player\n player = str(input(\"Enter player's name: \"))\n print(f\"Hello {player}!! Let's play rock-paper-scissors! When prompted, enter 'rock', 'paper', or 'scissors'. If you beat me, you get a point. If I beat you, I get a point. First to receive 3 points wins the game. Ready? Let's go!\")", "def create_players_list(self):\n for p in self.players_names:\n self._players_list.append(Player(p))", "def search_player_by_name(players_table, name):\r\n result = players_table.search(Query().Nom == name)\r\n print(result)", "def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def make_player_list(player_arg):\n\n players = []\n\n names_pieces = player_arg.split(',')\n for name_piece in names_pieces:\n player_name, piece_name = name_piece.split(':')\n piece = PIECE_MAP[piece_name]\n players.append(player.Player(player_name, piece))\n\n return players", "def nflplayers(self, irc, msg, args, optplayer):\n \n db_filename = self.registryValue('nflPlayersDb')\n \n if not os.path.exists(db_filename):\n self.log.error(\"ERROR: I could not find: %s\" % db_filename)\n return\n \n db = sqlite3.connect(db_filename)\n cursor = db.cursor()\n \n optplayer = optplayer.lower().strip()\n\n #cursor.execute(\"select id from players where name='?'\", ([optplayer]))\n \n query = \"select id, name from players WHERE name LIKE '%%%s%%'\" % optplayer\n cursor.execute(query)\n \n rows = cursor.fetchall()\n \n if len(rows) < 1:\n irc.reply(\"I did not find anything matching: %s\" % optplayer)\n return\n else:\n results = string.join([str(item[1]) + \" (\" + str(item[0]) + \")\" for item in rows], \" | \")\n output = \"I found {0} results for: {1} :: {2}\".format(len(rows), optplayer, results)\n irc.reply(output)", "def test_get_player_names_stdout(self):\n INPUT.side_effect = ['A', 'B', '']\n game.pig.get_player_names()\n INPUT.assert_has_calls([\n mock.call(\"Player 1's name: \"),\n mock.call(\"Player 2's name: \"),\n mock.call(\"Player 3's name: \")\n ])", "async def _players(self, ctx: Context):\n\n guild = ctx.guild\n\n player_role = await self.role_from_config(guild, \"player_id\")\n\n players = [\n user.mention for user in guild.members if player_role in user.roles\n ]\n\n title = _(\"Total Players: {}\").format(len(players))\n txt = \"\\n\".join(players)\n\n embed = discord.Embed(\n colour=player_role.color, title=title, description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"I need embed permissions for this command.\")", "def update_players_name(self, player_1, player_2):\n self.model.player_1 = player_1\n self.model.player_2 = player_2\n self.logger.info(\"User_1 has name %s, user_2 has name %s\", player_1, player_2)", "def add_player(players):\n markers = [player.mark for player in players]\n while True:\n init_args = Player.prompt_init()\n # Check valid player.\n if init_args['mark'] in markers:\n print(\"Sorry someone already chose marker {}, please choose another.\".format(\n init_args['mark']))\n continue\n # Exit if valid player.\n break\n # If we get here then we have a valid player to add.\n return Player(**init_args)", "def all():\n lab = test_loading()\n\n for _ in range(1):\n print('🦅🐀🐙')\n\n test_spawn(lab)\n\n pc = test_spawn_player(lab)\n\n while True:\n pc.store_move(PlayerMove(random.choice(['walk left', 'walk up', 'walk down', 'walk right'])))\n test_turn_ai_and_players(lab)\n if input() == '0':\n break", "def set_plays(self) -> None:\n player1 = self._get_input('What is the name of player 1?')\n player2 = self._get_input('What is the name of player 2?')\n self.state = State(player1, player2)", "def create_players(tournament):\n enter_player = 1\n while enter_player <= 8:\n player_list = select_players()\n selection = view.menu_create_player(player_list)\n if selection == 1:\n # ---------------------------------------------------------------------------------\n # Joueur existant\n view.print_actors(player_list)\n data = create_existing_player()\n player_db = Player(data['Nom'], data['Prenom'], data['Date de Naissance'],\n data['Sexe'], data['Classement'])\n # add the player id to the list of tournament players_id\n tournament.players_index_list.append(data.doc_id)\n # add the player to the list of tournament players with the tournament method\n tournament.add_player(player_db)\n # ---------------------------------------------------------------------------------\n elif selection == 2:\n # ---------------------------------------------------------------------------------\n # Nouveau joueur\n data = view.create_player_view(enter_player)\n player = Player(data['name'], data['first_name'], data['birth_day'], data['sex'],\n data['ranking'])\n # add the player to the list of tournament players with the tournament method\n tournament.add_player(player)\n # players are registered in the database\n save_player(player)\n # prendre l'identifiantiant du joueur\n for id_player in get_players_id(1):\n tournament.players_index_list.append(id_player)\n # ---------------------------------------------------------------------------------\n enter_player += 1", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def getName():\n\n tcflush(sys.stdin, TCIFLUSH)\n name = input(\" You say:\\n \")\n updateNameDatabase(name)\n return name", "def show_bench_player(self):\n if (len(self.bench_players) == 0):\n print(\"The bench is empty.\")\n else:\n for i in range(len(self.bench_players)):\n print(self.bench_players[i].name)", "def build_player_data():\n names = [\"Gunther O'Brian\",\n 'Workman Gloom',\n 'Esme Ramsey',\n 'Cornelius Games',\n 'Kline Greenlemon',\n 'Hotbox Sato',\n 'Famous Owens',\n 'Jenkins Good']\n nums = [77, 31, 37, 6, 14, 53, 7, 64]\n avgs = [0.40666, 0.118451, 0.400093, 0.335117,\n 0.425694, 0.353378, 0.179842, 0.246856]\n\n return names, nums, avgs", "def dogs():\r\n print(\"Please give 3 dogs you want to going hunting with names\")\r\n for x in range(3):\r\n d = str(input())\r\n x += 1\r\n print(\"dog name:\" + d)\r\n print(\"What wonderful choices of dogs\")", "def StrawPicker():\n names_list = []\n print(\"Welcome to the straw picker. Start entering your names. Type 'done' once you're finished.\")\n while True:\n names = input(f\"Enter your name #{len(names_list)}: \")\n if QuBa(names):\n return\n if len(names) < 1:\n print(\"Enter name with more than 2 alphabets.\")\n continue\n if names == 'done':\n break\n names_list.append(names)\n while True:\n print(\"The one with the shortest straw is.. \" + choice(names_list) + \"!\")\n yn = input(\"Roll again? \").lower()\n if yn.startswith('y'):\n continue\n elif yn.startswith('n'):\n yn = input(\"Enter again? \")\n if yn.startswith('n') or QuBa(yn):\n print(\"OK. Bye!\")\n return\n elif yn.startswith('y'):\n StrawPicker()", "def registerPlayer(name):\n # cn=name\n # title='playerName'\n # data=[title,cn]\n DB = connect()\n c = DB.cursor()\n #cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s)\",*/\n #c.execute(\"INSERT INTO tournament (playerName) values ('al pachino2') \")\n #c.execute(\"INSERT INTO tournament name values (%s)\", name)\n #cur.execute('INSERT INTO %s (day, elapsed_time, net_time, length, average_speed, geometry) VALUES (%s, %s, %s, %s, %s, %s)', (escaped_name, day, ))\n c.execute(\"INSERT INTO tournament VALUES (%s)\", (name,))\n DB.commit()\n DB.close()", "def test_set_player_names(self):\n\n # Setup new games and attempt to set their players' names\n valid_players = [\n [\"Bob\", \"Sam\", \"Cal\", \"Kris\"],\n [\"Player 1\", \"Player 2\", \"Player 3\", \"Player 4\", \"Player 5\"],\n [\"Bot\"],\n [\"P1\", \"P2\", \"P3\"],\n ]\n for players in valid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(len(players))\n game.set_player_names(players)\n self.assertEqual(game.player_names, players, msg=\"The game's player names were not correctly set with: \" + str(players))\n\n # Make sure that the new game state is corectly set\n self.assertEqual(game.state.name, \"start_game\", msg=\"The game's state was not correctly set after setting the player names.\")\n\n # Try to set invalid players\n invalid_players = [\n None,\n [None, None],\n [123, 456, 789],\n [\"Bob\", \"Sam\", 123],\n [\"John\", \"\"],\n ]\n for players in invalid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(len(players or \"1\"))\n success = False\n try:\n game.set_player_names(players)\n except InvalidGamePlayerNames:\n success = True\n self.assertTrue(success, msg=\"The following invalid series of player names was able to be set: \" + str(players))\n\n # Test the case where the number of players given is not the same as the number of names given\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(2)\n success = False\n try:\n game.set_player_names([\"P1\", \"P2\", \"P3\"])\n except InvalidGamePlayerNames:\n success = True\n self.assertTrue(success, msg=\"A number of player names unequal to the number to the number of players in the game was able to be set.\")\n\n # Try to reset the names of the players to throw an error\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(3)\n game.set_player_names([\"P1\", \"P2\", \"P3\"])\n success = False\n try:\n game.set_player_names([\"P01\", \"P02\", \"P03\"])\n except InvalidGameMethodOrder:\n success = True\n self.assertTrue(success, msg=\"The names of the players was incorrectly able to be reset.\")", "def display_player(cls, player, title=False):\n menu = \"-\".center(cls.MAX_LENGTH, '-') + \"\\n\"\n if title:\n menu += str(title).center(cls.MAX_LENGTH, '-') + \"\\n\\n\"\n menu += \"Last Name\".ljust(cls.NAME_LENGTH) + \"First Name\".ljust(cls.NAME_LENGTH)\n menu += \"Gender\".ljust(cls.GENDER_LENGTH) + \"Date Birth\".ljust(cls.DATE_LENGTH)\n menu += \"Elo\".ljust(cls.ELO_LENGTH) + \"\\n\"\n if isinstance(player, list):\n s = menu\n for instance_player in player:\n s += instance_player.last_name.ljust(cls.NAME_LENGTH)\n s += instance_player.first_name.ljust(cls.NAME_LENGTH)\n s += instance_player.gender.ljust(cls.GENDER_LENGTH)\n s += instance_player.date_birth.ljust(cls.DATE_LENGTH)\n s += instance_player.ranking.ljust(cls.ELO_LENGTH) + \"\\n\"\n print(s)\n else:\n s = menu\n s += player.last_name.ljust(cls.NAME_LENGTH)\n s += player.first_name.ljust(cls.NAME_LENGTH)\n s += player.gender.ljust(cls.GENDER_LENGTH)\n s += player.date_birth.ljust(cls.DATE_LENGTH)\n s += player.ranking.ljust(cls.ELO_LENGTH) + \"\\n\"\n print(s)", "async def whoplays(self, ctx, *, game):\r\n if len(game) <= 2:\r\n await self.bot.say(\"You need at least 3 characters.\")\r\n return \r\n \r\n server = ctx.message.server\r\n members = server.members\r\n\r\n playing_game = \"\"\r\n for member in members:\r\n if member != None and member.game != None and member.game.name != None and not member.bot:\r\n if game.lower() in member.game.name.lower():\r\n playing_game += \"+ {} ({})\\n\".format(member.name, member.game.name) \r\n\r\n if playing_game == \"\":\r\n await self.bot.say(\"No one is playing that game.\")\r\n else:\r\n msg = \"```python\\n\"\r\n msg += \"These are the people who are playing {}: \\n\".format(game)\r\n msg += playing_game\r\n msg += \"```\" \r\n await self.bot.say(msg)", "def create_existing_player():\n logic_test = True\n data = \"\"\n while logic_test:\n try:\n player_choice = view.select_player_view(select_players())\n data = select_players()[player_choice]\n logic_test = False\n except IndexError as error:\n view.show(error)\n continue\n return data", "def Play():\n\tticTacToeGames = []\n\twhile True:\n\t\tgameName = input('Shall we play a game? ')\n\t\tif gameName == 'TicTacToe':\n\t\t\tnumPlayers = int(input('How many human players, Professor? '))\n\t\t\tgame = PlayTicTacToe(numPlayers)\n\t\t\tticTacToeGames.append(game)\n\n\t\telif gameName == 'Save':\n\t\t\tSaveListInFile(ticTacToeGames)\n\t\telif gameName == 'Load':\n\t\t\tLoadListFromFile()\n\t\telif gameName == 'Matchboxes':\n\t\t\tPrintMatchboxes()\n\t\telif gameName == 'Learn':\n\t\t\tnumGames = int(input('How many games, Professsor? '))\n\t\t\tfor _ in range(numGames):\n\t\t\t\tticTacToeGames.append(PlayTicTacToe(0))\n\t\telif gameName == \"No\":\n\t\t\tprint ('Good-Bye, Professor')\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"I don't know how to play {}\".format(gameName))\n\treturn", "def word_cloud_from_user_input(self):\n\n getting_name = True\n\n print(\"Please enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.show_word_cloud()", "def fortune():\n username = raw_input('what is your name?')\n fortuner = ['you will not live as long as you think', 'happiness will soon find your mom', 'things arent so great', 'a wish that you made in the past is prolly not gonna happen', 'you will be greeted with a gift in the near,...aww,...nvm', 'the sky will fall on you tomorrow, but we all knew that']\n print fortuner[(len(username)-1)%len(fortuner)]", "def add_new_player(self) -> None:\n\n # 1\n for elem in self.data:\n key = ''\n value = ''\n for k, v in elem.items():\n if k == 'name':\n key = v\n else:\n value = v.get()\n self.attributs.update({key: value})\n\n # 2\n order = ct.Controls.verify_players_creation(self.attributs)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(self.master.master.list_instances_menus_tournament)\n # 3\n if order['order'] == 'repeat_step':\n self.display()\n else:\n self.destroy_window()\n self.master.master.launch()", "def get_name(self):\n name = input(\"What is your name? \")\n if len(name) > 0:\n self.name = name", "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "def get_players():\n return {\"X\": play_human, \"O\": play_ai}", "def option2(movies):\r\n playerName = input ('Please select an actor:\\n')\r\n flag = 0\r\n for k in movies.keys():\r\n #if the player is in the dictionary's keys\r\n if playerName in movies[k]:\r\n flag = 1\r\n if flag == 0:\r\n print(\"Error\")\r\n return\r\n otherPlayers = []\r\n otherPlayers = set(otherPlayers)\r\n for x in movies.keys():\r\n #the condition verify if the actor is one of the values\r\n if playerName in movies[x]:\r\n otherPlayers = set(otherPlayers | movies[x])\r\n otherPlayers.remove(playerName)\r\n #sorting according the 'abc' serial\r\n otherPlayers = sorted(otherPlayers)\r\n #if they are no actors in the group\r\n if not otherPlayers:\r\n print(\"There are no actors in this group\\n\")\r\n return \r\n print(', '.join(otherPlayers)) \r\n return", "def add_player(inp_to_add, type_to_add, host, root, password):\r\n detail_dict = {}\r\n\r\n if type_to_add == \"url\":\r\n player_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n player_site = inp_to_add\r\n else:\r\n player_soup, player_site = get_first_search_result(\r\n SOCCER_URL + \"/search/players/?q=\" + inp_to_add, player=1)\r\n\r\n if player_soup:\r\n\r\n passport = player_soup.find('div', class_=\"block_player_passport real-content clearfix\")\r\n\r\n if passport:\r\n details = passport.find_all('dt')\r\n results = passport.find_all('dd')\r\n\r\n detail_dict = {}\r\n for i in range(len(details)):\r\n detail_dict[details[i].text] = results[i].text\r\n\r\n league_url = SOCCER_URL + player_soup.find('table', class_=\"playerstats career sortable table\")\\\r\n .tbody.tr.find('td', class_=\"competition\").a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n return detail_dict[\"First name\"], detail_dict[\"Last name\"] # Return first and last name as in DB\r", "def greet_player(name):\n\t\n\tprint \"How are are you doing %s?\" % name", "def getPlayerListFromGUI(self):\n playerlist = []\n # Tried to be cheeky and only have this called on initialization, but this made adding / removing to player list in real time impossible\n # Get input list of target players\n src = \"./data/player_list.txt\"\n txt = open(src, \"r\", encoding=\"utf8\")\n\n for aline in txt:\n values = aline.strip(\"\\n\").split(\",\")\n playerlist.append(values)\n txt.close()\n\n return playerlist", "def load_player():\n print(\"Who is playing? \\n\")\n player_name = input('>')\n print()\n with shelve.open('myfile') as loadfile:\n try:\n user = loadfile[player_name]\n except KeyError:\n return no_name()\n return user", "def playerinput(kleuren):\r\n vierhidden = []\r\n i = 0\r\n try:\r\n while i < 4:\r\n kleur1, kleur2, kleur3, kleur4 = input('Geef jouw combinatie: ').split()\r\n kleurcombinatie = kleur1, kleur2, kleur3, kleur4\r\n for kleur in kleurcombinatie:\r\n if kleur not in kleuren:\r\n print('Kies een van de kleuren uit de lijst.')\r\n else:\r\n vierhidden.append(kleur)\r\n i += 1\r\n except:\r\n print('Geef 4 kleuren uit de lijst in 1 regel, met spatie en kleine letters')\r\n return playerinput(kleurenlijst)\r\n return vierhidden", "def greet_users(names):\n for name in names:\n msg = \"Hello, \" + name.title() + \"!\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = \"Hello, \" + name.title() + \"!\"\n print(msg)", "def tell_all_players(message):\n print(\"tell_all_players - disabled\")\n #player_obj_list = leetcoin_client.getPlayerObjList()\n #for player_obj in player_obj_list:\n # #print(\"player_obj key: %s\" player_obj.get_key())\n # print(player_obj.get_userid())\n # \n # playerinfo = playerinfo_from_userid(player_obj.get_userid())\n # \n # i = index_from_playerinfo(playerinfo)\n # m = HintText(index=i, chat=1, message=message)\n # m.send(i)", "def check_player_name(words):\n\n # check for a player by looking at all the words as a string then working backwards.\n for x in reversed(range(len(words) + 1)):\n\n name = words[:x]\n remaining = words[x:]\n\n if name == []:\n continue\n\n player = nhl_players.get_player(_make_name(name))\n if player:\n # print (\"player hit %s\" % player )\n return player, remaining\n\n return None, words", "def get_command(self):\n if len(Player.table.all()) < 8:\n return NewTournamentCommand(None)\n self.tournament_data = {\n \"name\": input(\"Title of the tournament:\"),\n \"location\": input(\"Location:\"),\n \"date_start\": input(\"Start date (yyyy/mm/dd):\"),\n \"date_end\": input(\"End date (yyyy/mm/dd):\"),\n \"time_control\": input(\"Time control:\"),\n \"description\": input(\"Description:\"),\n }\n self.check_data_tournement()\n players = []\n while len(players) < 8:\n print(\"Choose a player from the list:\")\n Player.print_all()\n id = input(\"Player's id:\")\n if id.isdigit():\n players.add[Player.get(id)]\n return NewTournamentCommand(self.tournament_data)", "def registerPlayer(name):\n\n if len(name) < 1:\n print \"Player not registered. Invalid name or no name given.\"\n else:\n query = \"INSERT INTO players (name) VALUES (%s)\"\n values = (name,)\n results = executeQuery({\n 'dbname': 'tournament', \n 'query' : query, \n 'type' : 'insert', \n 'values' : values\n })", "def topics_from_user_input(self):\n\n getting_name = True\n\n print(\"\\nPlease enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.print_topics()", "def input_artists():\n artists = []\n name = \"dummy\" # otherwise loop does not start\n while name:\n name = input(\"\\nPlease enter names of artists to scrape. Press 'Enter' to finish:\\n\\n \")\n artists.append(name)\n\n return artists[:-1] # last artist is empty", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def greet_user(names):\n\n for name in names:\n msg = f\"Hello, {name.title()} !\"\n print(msg)", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection", "def waitForName():\n soundEnterName()\n print(\"Welcome new user.\")\n \n name = \"\"\n while not validUsername(name):\n name = \"\"\n print(\"🤔\")\n while len(name) == 0:\n name = input(\"Enter your username: \")\n name = name.strip()\n name = name.lower() # lowercase the username\n if not validUsername(name):\n print(\"🚫\") # prohibited emoji\n print(\"ERROR: Enter a valid username. No punctuation.\")\n soundError()\n\n\n return name", "def name():\n\treturn input('Masukkan Nama : ')", "def run():\n \n # Enter player name\n #player_name = raw_input(\"Put your Name: \\n \")\n player1 = Player(raw_input(\"Put Player 1 name: \\n \"))\n player2 = Player(raw_input(\"Put Player 2 name: \\n \")) \n \n # Generate Deck\n cards = gen_deck()\n \n game_on = True\n start_pl = 0\n while game_on == True :\n deck = copy(cards) # Cards being played this hand\n deal_cards(deck, player1, player2)\n \n play_set(player1, player2, start_pl) \n\n game_on = check_score(player1, player2, game_on)" ]
[ "0.79158205", "0.785065", "0.7320951", "0.7222905", "0.71919775", "0.71211344", "0.7065533", "0.7033948", "0.69090784", "0.67799306", "0.6779573", "0.6625977", "0.6590808", "0.6588982", "0.65594393", "0.65389705", "0.6485963", "0.648116", "0.64683044", "0.6448013", "0.64186925", "0.6393356", "0.6355518", "0.6336562", "0.63091063", "0.6254228", "0.6217876", "0.62044835", "0.620442", "0.62031066", "0.6174189", "0.61571854", "0.6134621", "0.60989517", "0.608839", "0.6086712", "0.60790014", "0.6064547", "0.6030523", "0.60190046", "0.6011855", "0.5994769", "0.5990174", "0.59501755", "0.5932639", "0.59110934", "0.5906856", "0.5894753", "0.58913475", "0.58892894", "0.58616525", "0.58572507", "0.58552873", "0.58489674", "0.5832671", "0.5800618", "0.5783078", "0.57690465", "0.57392573", "0.57158285", "0.5711604", "0.57050467", "0.5689149", "0.56739867", "0.5663719", "0.5650022", "0.5646743", "0.56437135", "0.5639604", "0.56367075", "0.5627101", "0.56259227", "0.5615969", "0.56081474", "0.55972534", "0.5590488", "0.55882293", "0.5574364", "0.5569068", "0.5562344", "0.5562092", "0.5555282", "0.5554774", "0.5550353", "0.55477214", "0.55452114", "0.55369693", "0.55369693", "0.5535483", "0.5531686", "0.553128", "0.55254817", "0.5520416", "0.55196476", "0.5513114", "0.55124444", "0.55055106", "0.55005115", "0.5497438", "0.54965603", "0.5491523" ]
0.0
-1
Reads a file and returns statistics about the contents.
def analyzeFile(filename): fileData = open(filename, encoding="utf-8") # open the file counts = {} for line in fileData: # iterates over every line of the file words = line.split() # turns each line into a list for word in words: #iterates over the words in each line list word = word.lower().strip(string.whitespace+string.punctuation) if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary #when it gets here for the first line it goes back up to the top and repeats for the 2nd line mostCommonWord = [word] leastCommonWord = [word] shortestWord = [word] longestWord = [word] for item in counts: if counts[mostCommonWord[0]] < counts[item]: mostCommonWord = [item] elif counts[mostCommonWord[0]] == counts[item]: mostCommonWord.append(item) if counts[leastCommonWord[0]] > counts[item]: leastCommonWord = [item] elif counts[leastCommonWord[0]] == counts[item]: leastCommonWord.append(item) if len(shortestWord[0]) > len(item): shortestWord = [item] elif len((shortestWord[0])) == len(item): shortestWord.append(item) if len(longestWord[0]) < len(item): longestWord = [item] elif len(longestWord[0]) == len(item): longestWord.append(item) return (mostCommonWord, leastCommonWord, shortestWord, longestWord)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict", "def readstatsFile(filename):\r\n contigs = dict()\r\n with open(filename) as f:\r\n for idx, line in enumerate(f):\r\n if idx == 0:\r\n continue\r\n else:\r\n line = line.strip().split()\r\n entry = contigEntry(line)\r\n contigs[entry.contigName] = entry\r\n return contigs", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def file_stat(self, file_path):", "def process_file(file_path):\n # Check if file exists\n if not os.path.exists(file_path):\n print \"The input file %s does not exist.\" % file_path\n sys.exit(1)\n\n count = 0\n summation = 0.0\n with open(file_path) as file_object:\n # Read file chunk by chunk in ~4K size,\n # process each chunk, and aggregate the result of them\n for chunk in read_file_in_chunk(file_object):\n chunk_count, chunk_sum = process_chunk(chunk)\n count += chunk_count\n summation += chunk_sum\n print \"Count: %d\" % count\n print \"Sum: %f\" % summation", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def read_file(file):\n f = open(file, 'r')\n print(f.read())", "def read_stats(filename):\n header = {}\n tableinfo = {}\n measures = []\n rowmeasures = []\n\n with open(filename, 'rt') as fp:\n lines = fp.readlines()\n for line in lines:\n if line == line[0]:\n continue\n #parse commented header\n if line.startswith('#'):\n fields = line.split()[1:]\n if len(fields) < 2:\n continue\n tag = fields[0]\n if tag == 'TableCol':\n col_idx = int(fields[1])\n if col_idx not in tableinfo:\n tableinfo[col_idx] = {}\n tableinfo[col_idx][fields[2]] = ' '.join(fields[3:])\n if tableinfo[col_idx][fields[2]] == \"StructName\":\n struct_idx = col_idx\n elif tag == \"Measure\":\n fields = ' '.join(fields).replace('CortexVol ', 'CortexVol, ').split()\n fields = ' '.join(fields[1:]).split(', ')\n measures.append({'structure': fields[0],\n 'name': fields[1],\n 'description': fields[2],\n 'value': fields[3],\n 'units': fields[4],\n 'source': 'Header'})\n elif tag == \"ColHeaders\":\n if len(fields) != len(tableinfo):\n for idx, fieldname in enumerate(fields[1:]):\n if idx + 1 in tableinfo:\n continue\n tableinfo[idx + 1] = {'ColHeader': fieldname,\n 'Units': 'unknown',\n 'FieldName': fieldname}\n else:\n continue\n else:\n header[tag] = ' '.join(fields[1:])\n else:\n #read values\n row = line.split()\n values = {}\n measures.append({'structure': row[struct_idx-1],\n 'items': [],\n 'source': 'Table'}),\n for idx, value in enumerate(row):\n if idx + 1 == struct_idx:\n continue\n measures[-1]['items'].append({\n 'name': tableinfo[idx + 1]['ColHeader'],\n 'description': tableinfo[idx + 1]['FieldName'],\n 'value': value,\n 'units': tableinfo[idx + 1]['Units'],\n })\n return header, tableinfo, measures", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def read_file(path_to_file):\n 8", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def read_from_file(self, filename: str) -> None:", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content", "def parseFile(self, filename):\n\n f = open(filename, \"r\")\n s = f.read()\n f.close()\n\n logging.log(10, 'parsing filename %s: %d lines' % (filename, len(s)))\n\n self.parseString(s)", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def _read_stats(self, name):\n if os.name == 'nt':\n name = asunicode(name)\n stats = os.stat(name)\n mode = oct(stats.st_mode)[-4:]\n size = stats.st_size\n atime = int(stats.st_atime)\n mtime = int(stats.st_mtime)\n return (mode, size, mtime, atime)", "def read_from_file(filename):\n with open(filename, \"r\") as f:\n f.readlines()", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def read_file(file_path):\n file_contents = None\n with open(file_path) as f_desc:\n file_contents = f_desc.read()\n if not file_contents:\n raise CLIError('Could not read {}'.format(file_path))\n return file_contents", "def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))", "def read_file(file_path):\n with open(file_path) as file_h:\n return file_h.readlines()", "def ReadFileContents(file_name): \n all_file_contents = open(file_name, 'r').readlines()\n return all_file_contents", "def read_file(filename):\n return open(filename).read()", "def analyze_text(filename):\n lines = 0\n characters = 0\n with open(filename, \"r\") as f:\n for line in f:\n lines += 1\n characters += len(line)\n return lines, characters", "def read_file(filename):\n\n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n\n return lines", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def analyze_file(filename, hash, nap):\n # http://www.pythoncentral.io/hashing-files-with-python/\n hasher = hashlib.new(hash)\n shannon = EntropyCounter()\n BLOCKSIZE = 1024 * hasher.block_size\n with open(filename, 'rb') as afile:\n # first buffer read does not get a nap\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n with nap:\n hasher.update(buf)\n shannon.update(buf)\n buf = afile.read(BLOCKSIZE)\n return {\n 'size': os.path.getsize(filename),\n hasher.name: hasher.hexdigest(),\n 'path': filename,\n 'efficiency': shannon.efficiency(),\n }", "def analyze_file(self, filename):\n if self.exceeded_max():\n return\n\n if self.preprocess is not None:\n input = self.preprocess(filename)\n else:\n with open(filename, \"r\") as file:\n input = file.read()\n\n self.analyze_raw(input)", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def process_file(self, filename: str) -> None:\n try:\n with open(filename, mode='r', encoding='utf-8') as f:\n contents = f.read()\n except Exception as e: # pylint: disable=broad-exception-caught\n # log and ignore exceptions from read\n logging.exception('Error reading %s: %s', filename, e)\n else:\n self.process(filename, contents)", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def readfile(file):\n with open(file, 'r') as f:\n data = f.read().splitlines()\n return data", "def read_file(fname):\n recs=[]\n logger.info('Start read file %s', fname)\n with open(fname) as inf:\n for line in inf:\n recs.append(line.strip().split())\n logger.info('End reading with recnumber %d', len(recs))\n return recs", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def read_single_file(file):\n if(file.exists()):\n read_file = open(file)\n return read_file.readlines()\n else:\n print(\"Sorry, the file cannot be found!\")", "def get_file_contents(file_name):\n\n\tf = open(file_name)\n\tlines = f.readlines()\n\tf.close()\n\treturn lines", "def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", "def readFromFile(filename):\n raise NotImplementedError", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def read_data(file):\n with rasterio.open(file) as f:\n data = f.read(1)\n profile = f.profile\n return data, profile", "def analyze_text(filename):\n lines = 0\n chars = 0\n\n with open(filename, 'r') as f:\n for line in f:\n lines += 1\n chars += len(line)\n return (lines, chars)", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def read_profiles(filename):\n profiles = []\n with gzip.open(filename, mode='rt', encoding='utf8') as infile:\n for line in infile:\n profiles.append(Counter(line.split()))\n return profiles", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def read_file_in_parallel(file_name):\n\n logger.debug(\"start process large file in parallel\")\n\n # create pool of workers\n pool = multiprocessing.Pool(multiprocessing.cpu_count() - 1)\n\n # calculate the chunk size - for reading the file\n lines_buff_size = calculate_lines_buff_size(file_name)\n\n frequencies_counter = Counter()\n\n with open(file_name) as file:\n lines_iterator = read_lines_in_chunks(file, lines_buff_size)\n\n # map the work function to the workers, and provide ierator which iterate the lines in chunks\n results = pool.imap_unordered(parse_lines, lines_iterator, 1)\n\n # as soon as we start to receive results (partial dictionaries) from workers - merge them to the result dict\n for counter in results:\n frequencies_counter += counter\n\n logger.debug(\"finished process large file in parallel\")\n return frequencies_counter", "def analyze_file(self, file_path: str):\n # Set the file currently working on\n self.fileScanLabel.setText(file_path)\n\n # Reset the progress bar\n self.fileAnalyzeProgressBar.setValue(0)\n self.file_bytes_read = 0\n\n # Create a thread worker to do analyze the file\n logging.debug(\"----------------------------------------------\")\n logging.debug(\"Start Analyze Thread\")\n analyze_thread = threading.Thread(target=self.analyze_file_thread, args=(file_path, ))\n analyze_thread.start()\n logging.debug(\"Analyze Thread Complete\")\n logging.debug(\"----------------------------------------------\")", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def samtools_stats(filename):\n stats, err = Popen([\"samtools\",\"stats\",filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != \"\":\n raise Exception(err)\n stats = [x.split(\"\\t\") for x in stats.split(\"\\n\")]\n chksum = [x for x in stats if x[0].startswith(\"CHK\")][0]\n stats = dict([(x[1].replace(\":\",\"\"),set_type(x[2]),) for x in stats if x[0].startswith(\"SN\")])\n stats[\"filename\"] = filename\n stats[\"chksum_read_names\"] = chksum[1]\n stats[\"chksum_sequences\"] = chksum[2]\n stats[\"chksum_qualities\"] = chksum[3]\n return stats", "def read_file(filename):\n open_kwargs = {}\n if sys.version_info.major == 3:\n open_kwargs = {'encoding': 'utf-8'}\n\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n with open(filepath, **open_kwargs) as filecontents:\n return filecontents.read()", "def read_file(file_path):\n try:\n with open(file_path, \"r\") as file_obj:\n data = file_obj.read()\n code_type = classify_response(data)\n return data, code_type\n\n except FileNotFoundError:\n writer(f\"\\nerror: Unable to read file {file_path}\\n\", FORMAT[\"ERROR\"])\n sys.exit(1)", "def get_file_contents(filename):\n with open(filename, 'r') as f:\n content = f.read()\n return content", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'SubBasins':\n self.read_subbasins(f)\n elif self.cleantag(line) == 'HRUs':\n self.read_HRUs(f)\n # Next line\n line = f.nexttag()", "def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)", "def parse_file(self, fpath):\n sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir))\n with open(os.path.join(sdir, fpath), \"rb\") as f:\n return f.readlines()", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def read_file(fname):\n with open(fname, 'r') as fopen:\n fdata = fopen.read()\n return fdata", "def score_file(self, file, sample_rate=16000, vad=True):\n signal, _ = get_sample(file, sample_rate)\n return self.score_signal(signal, vad)", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def read_file(filename=\"\"):\n with open(filename, 'r') as f:\n f_contents = f.read()\n print(f_contents, end='')", "def read(filename):\n\n path = os.path.join(os.path.dirname(__file__), filename)\n\n with open(path) as f:\n return f.read()", "def open_and_read_file(file_path):\n\n contents = open(file_path).read()\n words = contents.split()\n return words", "def read(self, filename):\n raise NotImplementedError", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def get_data_from_file(filepath):\n with open(filepath) as f:\n return f.read()", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def read_file_time(path):\n start_time = time.time()\n try:\n file = open(path, mode='rb')\n data = file.read()\n return data\n except FileNotFoundError as err:\n logger.error(err)\n raise\n else:\n file.close()\n finally:\n end_time = time.time()\n da = end_time - start_time\n logger.info(\"Read %s take %f \" % (path, da))", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def readFile(self, fname):\r\n self.scores = []\r\n self.fname = fname\r\n try:\r\n with open(fname, 'r') as f:\r\n for line in f:\r\n self.appendScore(line.split(' '))\r\n except:\r\n pass", "def open_and_read_file(file_path):\n contents = open(file_path).read()\n # your code goes here\n\n return contents", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def open_and_read_file(file_path):\n\n text_file = open(file_path)\n full_text = text_file.read()\n\n return full_text", "def ReadFile(fname, binary=True):\n with open(Filename(fname), binary and 'rb' or 'r') as fd:\n data = fd.read()\n #self._out.Info(\"Read file '%s' size %d (%#0x)\" %\n #(fname, len(data), len(data)))\n return data", "def contents(file):\n with open(file) as f:\n return f.read()", "def read_content_load(self, filename):\n str_file_woc = self.import_file(filename)\n self.parse_load(str_file_woc)\n self.disp_load_info()", "def SimpleRead(fn):\n content = \"\"\n try:\n content = open(fn).read()\n except :\n print(\"Failed to read file: %s\\n\"%(fn))\n print sys.exc_info()[1]\n\n return content", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def read_file(filename):\n with open(filename, encoding='utf-8') as src:\n return [line.strip() for line in src.readlines()]", "def read(filename):\n with open(filename, 'r') as fRead:\n samples = list(map(lambda line: line.strip(), fRead))\n return samples", "def get_basic_stats(txt_path: str) -> dict[str, int]:\n\n with open('Data/books/' + txt_path, 'r', encoding=' utf-8') as f:\n s = f.read()\n\n sen = sent_tokenize(s)\n tok = word_tokenize(s)\n u_tok = set(tok)\n\n # Adapt the trigger word for chapter given the file\n if txt_path == 'HuckFinn.txt':\n ch = [word for word in tok if word == 'CHAPTER']\n\n elif txt_path == 'AnnaKarenina.txt':\n ch = [word for word in tok if word == 'Chapter ']\n\n else:\n ch = [word for word in tok if word == 'ACT']\n\n freq_tok = {}\n\n # Check if the file already exists. If not, runs the token count. If it does, does nothing\n if not os.path.isfile('Data/books/top_30_' + txt_path):\n\n for token in tok:\n if token in freq_tok.keys():\n continue\n\n freq_tok[token] = tok.count(token)\n\n top_30_tokens = sorted(freq_tok.items(), key=operator.itemgetter(1), reverse=True)[:30]\n\n with open('Data/books/top_30_' + txt_path, 'w', encoding='utf-8') as f:\n\n for token, freq in top_30_tokens:\n f.write(str(token) + ' ' + str(freq) + '\\n')\n\n return {'num_sents': len(sen), 'num_tokens': len(tok), 'vocab_size': len(u_tok), 'num_chapters_or_acts': len(ch)}", "def read_file(filename=\"\"):\n\n with open(filename, 'r') as f:\n read_data = f.read()\n\n print('{:s}'.format(read_data), end='')\n\n f.closed", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def load_file(file_name):\n with open(file_name,\"r\") as f:\n return f.readlines()", "def analyze_text(filename):\n with open(filename, mode='r') as f:\n lines = 0\n words = 0 \n chars = 0\n for line in f:\n lines += 1\n words += len(line.split())\n chars += len(line)\n return (lines, words, chars)", "def get_file_stat(host, fqpath):\n statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'\n command = \"stat -c '%s' %s\" % (statformat, fqpath)\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n stat_data = {}\n stat_string = rout.strip()\n (filetype, filename, inode,\n access, size, links,\n uid, gid, username, groupname) = stat_string.split(\":\")\n\n stat_data['filetype'] = filetype\n stat_data['filename'] = filename\n stat_data[\"inode\"] = inode\n stat_data[\"access\"] = access\n stat_data[\"size\"] = size\n stat_data[\"links\"] = links\n stat_data[\"username\"] = username\n stat_data[\"groupname\"] = groupname\n stat_data[\"uid\"] = uid\n stat_data[\"gid\"] = gid\n\n return stat_data\n\n g.log.error(\"Could not stat file %s: %s\" % (fqpath, rerr))\n return None", "def ReadFile(self, filename):\n file = open(filename, 'rb')\n result = \"\"\n try:\n result = file.read()\n finally:\n file.close()\n return result", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(filename):\n f = open(filename)\n code = f.read()\n f.close()\n return code" ]
[ "0.6311686", "0.6240214", "0.6233331", "0.6196064", "0.6166038", "0.6145933", "0.6106974", "0.6092859", "0.6058845", "0.6050925", "0.60243064", "0.60074306", "0.60046774", "0.5990698", "0.59887534", "0.5984321", "0.59824026", "0.5976109", "0.5953878", "0.5952983", "0.59175605", "0.59122723", "0.590014", "0.58779424", "0.5850641", "0.5843042", "0.5829827", "0.58126324", "0.577969", "0.5757167", "0.57244873", "0.5720339", "0.5718986", "0.5716767", "0.57134885", "0.57102853", "0.5681305", "0.5680481", "0.5680481", "0.56801355", "0.5679594", "0.5639168", "0.5634667", "0.56240016", "0.5622935", "0.5619371", "0.56189257", "0.56149924", "0.5612582", "0.56031907", "0.55981094", "0.5590264", "0.558703", "0.5579367", "0.5575035", "0.5573601", "0.5531116", "0.5523375", "0.5521959", "0.5514173", "0.55137813", "0.5512954", "0.55114645", "0.5509927", "0.5499617", "0.54966754", "0.5484462", "0.5474684", "0.5472554", "0.54724455", "0.54642886", "0.5452941", "0.5451928", "0.5440516", "0.54391414", "0.54384923", "0.5435594", "0.5432719", "0.54304624", "0.54273075", "0.54179615", "0.5416785", "0.5408059", "0.54018074", "0.5394385", "0.5387386", "0.538229", "0.53819704", "0.5374295", "0.5373504", "0.53661215", "0.5364254", "0.5364086", "0.53626907", "0.5356264", "0.53523254", "0.53485656", "0.534744", "0.5346441", "0.53449756", "0.53433007" ]
0.0
-1
This takes a string as an input parameter and treats it as a zip code, looks up the weather for that zipcode, and returns the current temperature at that zipcode in Fahrenheit.
def weather(zipcode): URL = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode + ',us&appid=' + '7d7a3cf9902ef14f54f49f160fc8a550' + '&units=imperial' webpage = urllib.request.urlopen(URL) contents = webpage.read() contents = contents.decode('ascii') weather = eval(contents) #this line turns it from a string into dictionaries and lists temperature = weather['main']['temp'] return temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response", "def get_current_temperature(zipcode, country):\n owm = pyowm.OWM(os.environ.get('OWM_KEY'))\n observation = owm.weather_at_zip_code(zipcode, country)\n weather = observation.get_weather()\n return {\n 'temperature': weather.get_temperature('fahrenheit')['temp'],\n 'wind_speed': weather.get_wind('miles_hour')['speed'],\n 'wind_direction': weather.get_wind('miles_hour')['deg'],\n 'humidity': weather.get_humidity(),\n 'status': weather.get_status(),\n }", "def get_external_temp():\n baseurl = \"http://api.openweathermap.org/data/2.5/weather\"\n query = \"?q=salhouse&mode=xml\"\n url = baseurl + query\n r = requests.get(url)\n root = ET.fromstring(r.text)\n kelvin = float(root[1].attrib.get('value'))\n celcius = kelvin - 272.15\n return celcius", "def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }", "def temperature_f(self, tuple_data, status):\r\n fahr_search = Temperature.fahr.search(status)\r\n temperature = None\r\n try:\r\n if fahr_search != None:\r\n temperature = fahr_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n else:\r\n celcius_search = Temperature.celcius.search(status)\r\n if celcius_search != None:\r\n temperature = celcius_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n temperature = ((9.0/5) * temperature) + 32\r\n except ValueError:\r\n print \"Encoding error on '%s'\" % (status)\r\n return temperature", "def get_temperature(data):\n celcius = 0\n celcius = [i for i in data if re.search(r'\\d+[/]', i)]\n \n if celcius == []:\n return \"N/A\"\n celcius = celcius[0].split('/')[0]\n celcius = celcius.replace('M', '-')\n \n try:\n celcius = int(celcius)\n except ValueError:\n return \"N/A\"\n\n farenheit = round((celcius * 9/5) + 32) # formula to get farenheit from celcius\n temperature = \"{0} C ({1} F)\".format(celcius, farenheit)\n return temperature", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def current_weather(city_name, API):\r\n global new_city\r\n try:\r\n if city_name.isnumeric(): # if input is zip\r\n url = f'http://api.openweathermap.org/data/2.5/weather?zip={city_name},&appid={API}'\r\n elif ',' in city_name: # if input has a city,state or city,country\r\n new_city = city_name.split(',')\r\n new_city_name = new_city[0].replace(' ', '%20') # so the url correctly handles spaces in cities\r\n if len(new_city[1]) > 2: # if the state/country code is invalid\r\n return \"Not valid state code/country code\"\r\n url = f'https://api.openweathermap.org/data/2.5/weather?q={new_city_name},{new_city[1]},us&appid={API}'\r\n elif ',' not in city_name: # if searched by only city and not state or country code, works for big cities\r\n url = f'http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={API}'\r\n response = requests.get(url).json() # getting the proper json data based on the input of the city_name\r\n city_latitude = str(response['coord']['lat'])\r\n city_longitude = str(response['coord']['lon'])\r\n if (new_city[1].upper() in states) and (\r\n response['sys']['country'] != 'US'): # to catch foreign cities with US state codes\r\n return \"Not valid city\"\r\n elif (new_city[1].upper() not in states) and (\r\n new_city[1].upper() != response['sys']['country'] and new_city != 'XXX'):\r\n # to catch US cities with foreign country codes\r\n return 'Not a valid city'\r\n elif states[new_city[1].upper()] != coordinates(city_latitude,\r\n city_longitude):\r\n # Check to see if city is located in provided state\r\n return 'City is not located in that state'\r\n current_temp = response['main']['temp']\r\n max_temp = response['main']['temp_max']\r\n min_temp = response['main']['temp_min']\r\n feels_like_temp = response['main']['feels_like']\r\n curr_temp_fheit = round((current_temp * 1.8) - 459.67) # converting to imperial\r\n max_temp_fheit = round((max_temp * 1.8) - 459.67)\r\n min_temp_fheit = round((min_temp * 1.8) - 459.67)\r\n feels_like_temp_fheit = round((feels_like_temp * 1.8) - 459.67)\r\n description = response['weather'][0]['description']\r\n wind = round(response['wind']['speed'] * 2.23694)\r\n\r\n format_weather = (\"Current weather for \" + str(city_name) + \", \" + response['sys']['country'] +\r\n \"\\nCurrent temp: \" + str(curr_temp_fheit) + '\\nMax Temp: ' + str(\r\n max_temp_fheit) + '\\nMin Temp: ' + str(\r\n min_temp_fheit) + '\\nFeels like: ' + str(\r\n feels_like_temp_fheit) + '\\nOutlook: ' + description + '\\nWind: ' + str(\r\n wind) + ' mph')\r\n # print weather in cleaner format\r\n return format_weather\r\n\r\n except KeyError: # If a city that doesn't exist is entered\r\n return 'Not valid city'", "def f_weather(phenny, input):\n icao_code = input.group(2)\n\n if not icao_code:\n return phenny.say(\"Try .weather London, for example?\")\n\n icao_code = code(phenny, icao_code)\n\n if not icao_code:\n phenny.say(\"No ICAO code found, sorry\")\n return\n\n uri = 'http://tgftp.nws.noaa.gov/data/observations/metar/stations/%s.TXT'\n\n try:\n bytes = web.get(uri % icao_code)\n except web.HTTPError:\n phenny.say(\"No NOAA data available for that location.\")\n return\n\n if 'Not Found' in bytes:\n phenny.say(icao_code + \": no such ICAO code, or no NOAA data\")\n return\n\n phenny.say(str(metar.parse(bytes)))", "def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)", "def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf", "def trans_weather(string):\r\n\treturn cn2en.WEATHER[string]", "def trans_temperature(string):\r\n\treturn int(string[:2])", "def get_weather(phenny, input):\n import wunderground\n \n report_type = 'conditions'\n\n unicode_input = unicode(input)\n if unicode_input[1:8] == 'weather':\n location_str = unicode_input[9:]\n elif unicode_input[1:3] == 'w ':\n location_str = unicode_input[3:]\n try:\n json_data = wunderground.format_json(location_str, input.weather_API, report_type)\n output_results(phenny, json_data)\n except Exception, e:\n print e\n phenny.say('Could not find results for \"%s\", please reword the search and try again.' % location_str)", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature", "def temperature(self):\r\n try:\r\n return str(self.connect()['main']['temp'])\r\n except:\r\n return '@weather_temperature'", "def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0", "def get_zip_code(string):\n zip_code = \"\"\n\n #for each character in string\n for ch in string:\n #if the character is a number, add it to the \"zip_code\" string\n if ch.isdigit():\n zip_code += ch\n\n return zip_code", "def convert_zip_code(zipcode):\n zipcode = tf.strings.regex_replace(zipcode, r\"X{0,5}\", \"0\")\n zipcode = tf.strings.to_number(zipcode, out_type=tf.float32)\n return zipcode", "def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)", "def weather():\n latlong = request.form.get(\"latlong\")\n latlong = latlong.split(\",\")\n data = lookup_weather(latlong[0],latlong[1])\n return render_template(\"weather.html\", data = data)", "def get_temp(html) -> None:\n\tif page_type_dict['general']:\n\t\tt_text = html.find('div', {'class': '_1HBR'}).text\n\t\tt_digit = ''.join([i for i in t_text if i.isdigit()])\n\t\tweather_dict['temperature'] = t_digit\n\telse:\n\t\tre_temp_class = re.compile('.*_2ezK.*') # regex template: str w/ '_2ezK'\n\t\ttemp_class = html.find('div', {'class': re_temp_class}) \n\t\t# we've got smth like: 'Ночью14°Утром19°Днём24°Вечером22°\n\t\tweather_lst = temp_class.text.split('°') # ['Ночью14','Утром19',...]\n\t\tint_weather_lst = [int(number.group()) for number in ( # for all the elems \n\t\t\tre.search(r'\\d+', word) for word in weather_lst) if number] # keep integers\n\t\t# result: [14, 19, 24, 22]\n\t\tweather_dict['temperature'] = int_weather_lst", "def temps(lieu):\r\n\r\n key = '5a72ceae1feda40543d5844b2e04a205'\r\n localisation = \"http://api.openweathermap.org/data/2.5/weather?q={0},fr&appid={1}\"\r\n localisation = localisation.format(lieu, key)\r\n request_html = requests.get(localisation)\r\n data = request_html.json()\r\n\r\n weather = data['weather'][0]['main']\r\n\r\n if weather == \"Clear\":\r\n weather = \"Beau\"\r\n\r\n elif weather == \"Clouds\":\r\n weather = \"Nuageux\"\r\n return weather", "def find_weather(city):\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', \n\t\t\t'http://api.openweathermap.org/data/2.5/weather', \n\t\t\tfields ={\n\t\t\t'q':city, \n\t\t\t'units':'metric', \n\t\t\t\"appid\": \"2bc3e79bb974a007818864813f53fd35\"\n\t\t\t}) \n\t\tparsed_data = json.loads(response.data.decode('utf-8'))\n\t\t\n\t\t\n\t\treturn (\"\\t{}\\t{}\\t{}\").format((parsed_data['name']).ljust(10),(str(parsed_data[\"main\"][\"temp\"])).ljust(10), parsed_data[\"weather\"][0][\"description\"])\n\n\texcept Exception as e:\n\t\tprint (e)", "def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather", "def GetWeatherByCity(City):\n WeatherUrl = \"http://api.openweathermap.org/data/2.5/weather?q=\"+ City + \"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\" \n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n if ('main' in WeatherInfo):\n pass\n else:\n print(\"Invalid City Name\")\n exit() \n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n return(Temp, Humidity, Description)", "def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()", "def temperature() -> float:", "def get_temperature(self, monannul, start, end, iso) -> str:\n #This is converting a username into a UUID which is how Minecraft differentiates between players\n url = 'http://climatedataapi.worldbank.org/climateweb/rest/v1/country/' + monannul + '/tas/' + start + end + iso + '.json'\n try:\n\n response = urllib.request.urlopen(url)\n json_results = response.read()\n try:\n r_obj = json.loads(json_results)\n uuid = r_obj['id']\n return uuid\n except JSONDecodeError:\n return \"none\"\n \n except urllib.error.HTTPError as e:\n print('Failed to download contents of URL')\n print('Status code: {}'.format(e.code))\n \n except urllib.error.URLError as e:\n print('Failed to download contents of URL')\n print('Status code: {}'.format(e))\n print(\"Perhaps you have no internet connection?\")", "def temperatures():\n\n return station_9281", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def get_closest_station_by_zipcode(zipcode):\n\n station_lookup_method_by_zipcode = lookup_usaf_station_by_zipcode(zipcode)\n try:\n station, warnings, lat, lon = _get_closest_station_by_zcta_ranked(zipcode)\n\n isd_metadata = get_isd_file_metadata(str(station))\n if len(isd_metadata) == 0:\n logging.warning(\"Zipcode %s mapped to station %s, but no ISD metadata was found.\" % (zipcode, station))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedUSAFIDError as e:\n logging.warning(\"Closest station %s is not a recognized station. Using backup-method station %s for zipcode %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode,\n zipcode))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedZCTAError as e:\n logging.warning(\"Unrecognized ZCTA %s\" % e)\n return None\n\n if str(station) != station_lookup_method_by_zipcode:\n logging.debug(\"Previously would have selected station %s instead of %s for zip code %s\" % (\n station_lookup_method_by_zipcode,\n str(station),\n zipcode))\n\n if warnings:\n logging.warning(\"Station %s is %d meters over maximum %d meters (%d meters) (zip code %s is at lat/lon %f, %f)\" % (\n str(station),\n int(warnings[0].data['distance_meters'] - warnings[0].data['max_distance_meters']),\n int(warnings[0].data['max_distance_meters']),\n int(warnings[0].data['distance_meters']),\n zipcode,\n lat,\n lon,\n ))\n logging.warning(\"Closest station %s is too far. Using backup-method station %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode))\n return station_lookup_method_by_zipcode\n\n return str(station)", "def run_script():\n var=raw_input(\"Enter a Zipcode: \")\n address='http://www.uszip.com/zip/'+var\n conn=urllib.urlopen(address)\n t=[]\n for line in conn.fp:\n\tline=line.strip()\n\tif '<title>' in line:\n\t line.split()\n\t print line[7:-16]\n\tif 'Total population' in line:\n\t line=line.strip('z')\n\t loc=line.index('Total population')\n\t loc2=line.index('<span')\n\t print line[(loc+25):loc2]", "def extract_zt(self, s):\n assert \"ZT\" in s\n m = re.search(\"ZT([0-9]\\.?[0-9]*)\", s)\n if m == None:\n raise Exception(\"Badly formed ZT time, bro!\")\n return float(m.group(1))", "def Get_Vital_Temp(raw_data,\n temp_startpos,\n temp_endpos):\n temp_ = raw_data[temp_startpos:temp_endpos]\n temp_ = temp_[2:4] + temp_[0:2]\n print(f'| raw_temp = {temp_}')\n temperature = Convert_Hex_To_Decimal(temp_) / 10\n return temperature", "def _temperature(self, p_input:float) -> float:\n if self._unit_in == 'R':\n temp_K = p_input*5.0/9.0\n elif self._unit_in == 'F':\n temp_K = (p_input+459.67)/9.0*5.0\n elif self._unit_in == 'C':\n temp_K = p_input+273.15\n elif self._unit_in == 'K':\n temp_K = p_input\n \n if self._unit_out == 'R':\n return (temp_K*9.0/5.0)\n elif self._unit_out == 'F':\n return (temp_K*9.0/5.0-459.67) \n elif self._unit_out == 'C':\n return (temp_K-273.15)\n elif self._unit_out == 'K':\n return temp_K", "def zip_code(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\treturn element.element_value", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")", "def get_temp():\n output = subprocess.run(['vcgencmd', 'measure_temp'], capture_output=True)\n temp_str = output.stdout.decode()\n try:\n current_temp = float(temp_str.split('=')[1].split('\\'')[0])\n print(\"Current core temp is: \",current_temp)\n return current_temp\n except (IndexError, ValueError):\n raise RuntimeError('Could not parse temperature output.')", "def weather():\r\n def weather_api_call():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' + weather_city_name + '&units=metric&appid=' + weather_api_key)\r\n resp_json = response.json()\r\n with open('weather.json', 'w') as outfile:\r\n # Uses the data from the API to overwrite the weather data\r\n json.dump(resp_json, outfile)\r\n outfile.close()\r\n\r\n def weather_data_extractor():\r\n with open('weather.json', 'r') as weather_json:\r\n weather_json = json.load(weather_json)\r\n temp = weather_json[\"main\"]\r\n weather_item = weather_json[\"weather\"]\r\n desc = weather_item[0]\r\n current_temperature = \"The current temperature is: \" + \\\r\n str(int(temp[\"temp\"])) + \"C\"\r\n current_feels_like = \"Feels like: \" + \\\r\n str(int(temp[\"feels_like\"])) + \"C\"\r\n forecast = desc[\"main\"]\r\n return current_feels_like, current_temperature, forecast\r\n\r\n weather_api_call()\r\n return weather_data_extractor()", "def temperature(self, alt):\n T = self.altitude_profile(alt)[1]\n return T", "def get_weather(html):\n\tcheck_page_type(html)\n\tget_temp(html)\n\tget_table(html)\n\treturn weather_dict", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def get(self, city: str):\n # Make a call to the OpenWeatherMap API and check the units inserted at the query parameter.\n units = request.args.get('unit', '').casefold()\n weather_data, query_units = self.get_weather(city, units)\n temp = self.check_unit(query_units)\n\n # Get the date from the request if no date is provided use the current date and time.\n date_raw = request.args.get('at')\n self.timezone = datetime.now().astimezone().tzinfo\n\n if date_raw:\n # Two date formats are allow an aware and naive date. If no time info has been given use the current time.\n try:\n date = isoparse(date_raw.replace(' ', '+'))\n except ValueError:\n now = datetime.now()\n date = datetime.strptime(date_raw, '%Y-%m-%d').replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond,\n tzinfo=self.timezone\n )\n else:\n now = datetime.now()\n date = datetime.now().replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond, tzinfo=self.timezone\n )\n\n # Prepare the error response.\n self.error = {\n 'error': '',\n 'error_code': ''\n }\n\n if self.check_past_date(date):\n return self.error, 400\n\n if type(weather_data) == dict:\n # Based on the date check the index of the weather that corresponds with the date in the weather response.\n index = self.find_index(weather_data, date)\n weather_dict = {\n f'{weather_data[\"list\"][index][\"weather\"][0][\"main\"].lower()}':\n f'{weather_data[\"list\"][index][\"weather\"][0][\"description\"]}',\n 'humidity': f'{weather_data[\"list\"][index][\"main\"][\"humidity\"]}%',\n 'pressure': f'{weather_data[\"list\"][index][\"main\"][\"pressure\"]} hPa',\n 'temperature': f'{str(weather_data[\"list\"][index][\"main\"][\"temp\"]) + temp}',\n }\n return weather_dict, 200\n\n elif '404' in str(weather_data):\n self.error['error'] = f'cannot find the city\"{city}\"'\n self.error['error_code'] = 'city_not_found'\n return self.error, 404\n\n else:\n self.error['error'] = 'Something went wrong'\n self.error['error_code'] = 'internal_server_error'\n return self.error, 500", "def get_temperature(\n self, sensitivity: Optional[str] = None, temp_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or temp_sensor is None:\n sensitivity, temp_sensor = self.get_temperature_sensor()\n if sensitivity == \"th\":\n temp = temp_sensor * 175.72 / 65536 - 46.85\n elif sensitivity == \"t\":\n temp = temp_sensor * 1.7572 - 46.85\n else:\n raise CloudWatcherException(\n f\"Unknown temperature sensor type {sensitivity}\"\n )\n\n return temp", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info", "def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def fetch_weather(y):\r\n # request parameter(s): Start with '?'\r\n # separate name and value with '='\r\n # multiple parameter name value pairs are separate with '&'\r\n query_string = \"?id={}&units=imperial&APIKEY={}\".format(y, API_KEY)\r\n request_url = WS_URL + query_string\r\n print(\"Request URL: \", request_url)\r\n response = requests.get(request_url)\r\n if response.status_code == 200:\r\n city_name = response.json()[\"city\"][\"name\"]\r\n lst = response.json()[\"list\"]\r\n tmp_list = []\r\n for i in range(len(lst) // 8):\r\n li = [x for x in range(len(lst)) if x // 8 == i]\r\n tmp_list.append(max([lst[j][\"main\"][\"temp_max\"] for j in li]))\r\n return City(city_name, tmp_list)\r\n else:\r\n print(\"How should I know?\")\r\n return None", "def GetCityByUser():\n City = str(input())\n if (City == \"\"):\n exit()\n Weather = GetWeatherByCity(City)\n print(\"The weather in \" + City + \" is:\\n\")\n PrintWeather(Weather) \n return 1", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def getTodaysWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tweather = {} \n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\t\t\n\t\t# Getting todays weather data and populating the dictionary\n\t\tif fio.has_daily() is True and fio.has_hourly() is True:\n\t\t daily = FIODaily.FIODaily(fio)\n\t\t hourly = FIOHourly.FIOHourly(fio)\n\t\t for day in xrange(0, 1):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(daily.get_day(day)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\tif item == \"sunsetTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"sunriseTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\tfor item in hourly.get_hour(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[\"current\"] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"town\"] = self.helper.getCoords(keyword)[2]\n\t\telse:\n\t\t\treturn 'No Todays data'\n\n\t\treturn weather", "def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None", "def weatherloop(zip_code,n):\n uh_reset()\n if n.isnumeric():\n n = int(n)\n else:\n n = 20\n Zip_Code = zip_code.strip().replace(' ','').split('-')[0]\n if n == 0:\n while True:\n weatherset(Zip_Code)\n else:\n for i in range(int(n)):\n weatherset(Zip_Code)\n time.sleep(5)", "def getLocation(self, state, city, token):\n d = requests.get(\n 'http://api.wunderground.com/api/' + str(token) + '/forecast/q/' + str(state) + '/' + str(city) + '.json')\n json = d.json()\n return json", "def fixup_zip(x):\n try:\n return int(str(x).strip()[:5])\n except ValueError:\n return float('nan')", "def GetLocation():\n IPinfoRequest = requests.get('https://ipinfo.io/')\n IPinfo = IPinfoRequest.json()\n Location = IPinfo['loc'].split(',')\n Latitude = Location[0]\n Longitude = Location[1]\n LocationForOpenweather = \"lat=\"+Latitude+\"&lon=\"+Longitude\n return(LocationForOpenweather)", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def getZipCode(dbpath) -> (int, float, float):\n conn = sqlite3.connect(str(dbpath))\n c = conn.cursor()\n c.execute(\"select zipcode, lat, long from user where id=1\")\n conn.commit()\n zipcode = c.fetchone()\n conn.close()\n return zipcode[0], zipcode[1], zipcode[2]", "def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']", "def temperature(self) -> Optional[float]:\n return self.data.get(\"temp\")", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def get_weather_data():\n get_pronto_data()\n zp = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zp.open('2015_weather_data.csv')\n return pd.read_csv(file_handle)", "def city():\n\n print(\"Welcome to my weather API\")\n\n yourcity = input(\"Enter A known City name: \")\n api_key = (\"077936f695f61908cd19a5a2452a97fb\") \n #our public Api-key \n response = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q={0}&appid=077936f695f61908cd19a5a2452a97fb\".format(yourcity, api_key)) #api call\n weather = response.json()\n print(\"The current weather in {0} is {1}\".format(yourcity, weather[\"weather\"][0][\"description\"])) #the result", "def toCelsius(farenheit):\r\n return (farenheit - 32)*5 / 9", "def get_weather(station_id):\n latitude, longitude = helper.get_station_coordinate(db, station_id)\n return jsonify(scrape(latitude, longitude))", "def temp_converter(temperature, **kwargs):\n if not isinstance(temperature, float) and not isinstance(temperature, int):\n raise ValueError('Positional argument `temperature` must be a float / int')\n\n if not kwargs:\n raise ValueError('Missing keyword argument!')\n else:\n if 'temp_given_in' not in kwargs:\n raise ValueError('Missing keyword argument `temp_given_in`')\n\n temp_given_in = kwargs['temp_given_in']\n\n if not isinstance(temp_given_in, str):\n raise ValueError('Keyword argument `temp_given_in` must be a str')\n\n if temp_given_in == 'c':\n return temperature * 1.8 + 32\n elif temp_given_in == 'f':\n return (temperature - 32) / 1.8\n else:\n raise ValueError('Temperature type must be `c` or `f`'.format(temp_given_in))", "def convertCelsiusToFahrenhe(C):\n if isinstance(C, str) == True:\n raise ValueError(\"Celsius cannot be a string value\")\n if isinstance(C,complex) == True:\n raise ValueError(\"Celsius cannot be a complex value\")\n if isinstance(C,int) == True:\n raise ValueError(\"Celsius should be a float value, example: 90.00\")\n \n F = (9.0/5.0 * C + 32.0)\n return F", "def connect(self):\r\n zip = self.zip\r\n ccode = self.ccode\r\n apikey = self.apikey\r\n url = f\"https://api.openweathermap.org/data/2.5/weather?zip={zip},{ccode}&appid={apikey}\"\r\n\r\n weather_obj = self._download_url(url)\r\n if weather_obj is not None:\r\n return weather_obj", "def getWeatherIndex(code, return_if_none=Constants.return_value_index_of_weather_not_found):\n # Start the index with 0\n index = 0\n for i in [100, 200, 300, 400]:\n for j in [0, 33, 66]:\n if inWeatherCodeRange(code, i + j, i + j + 33):\n return index\n index += 1\n return return_if_none", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def get_temp(val):\n if val in ['', 32767]:\n return None\n return temperature(val / 100., 'C').value('F')", "async def feels_like(self, temperature, humidity, windspeed):\n if temperature is None or humidity is None or windspeed is None:\n return 0\n\n e_value = (\n humidity * 0.06105 * math.exp((17.27 * temperature) / (237.7 + temperature))\n )\n feelslike_c = temperature + 0.348 * e_value - 0.7 * windspeed - 4.25\n return await self.temperature(feelslike_c)", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "async def lat_long(zip_code: str, country: str) -> Sequence[float]:\n key: str = f\"{zip_code}, {country}\"\n url: str = f'http://www.datasciencetoolkit.org/street2coordinates/{key.replace(\" \", \"+\")}'\n\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n response.raise_for_status()\n data = await response.json()\n\n city: Dict[str, Any] = data.get(f\"{zip_code}, {country}\", dict())\n return city.get(\"latitude\", 0.00), city.get(\"longitude\", 0.00)", "def fetch_weather(city):\n wparams = { 'city': city,\n 'key': WEATHERBIT_API_KEY\n }\n resp = requests.get(WEATHERBIT_API_URL, params=wparams)\n # this works, need to likely raise for status?\n full_weather = json.loads(resp.text)\n print(\"Got full_weather: %s\" % (full_weather))\n if not full_weather['data'][0]['precip']:\n print(\"Precip was None, coercing to 0\")\n full_weather['data'][0]['precip'] = 0\n weather_dict = {\n 'temp': farenheit(full_weather['data'][0]['temp']),\n 'conditions': full_weather['data'][0]['weather']['description'].lower(),\n 'precip': full_weather['data'][0]['precip'],\n 'forecast_temp': avg_based_on_forecast(city)\n }\n print(\"Trimmed down weather_dict: %s\" % (weather_dict))\n return weather_dict", "def get_my_zip_code(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetMyZipCode.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def get_weather(city: str, units='standard') -> tuple:\n api_key = os.environ.get('API_KEY')\n url = 'https://api.openweathermap.org/data/2.5/forecast?'\n\n try:\n response = requests.get(f'{url}q={city}&APPID={api_key}&units={units}')\n response.raise_for_status()\n return response.json(), units\n except HTTPError as error:\n return error, units", "def get_weather(city_name, date):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n server = (SERVER_IP, SERVER_PORT)\n sock.connect(server)\n\n sock.recv(1024)\n\n msg = \"100:REQUEST:city={}&date={}&checksum={}\".format(\n city_name, date, checksum(city_name, date)\n )\n sock.sendall(msg.encode())\n server_msg = sock.recv(1024)\n server_msg = server_msg.decode()\n if \"500\" not in server_msg:\n output = server_msg[server_msg.index(\"temp=\") + 5 :].split(\"&\")\n output[1] = output[1][5:]\n (tuple(output))\n else:\n output = (\n 999,\n server_msg[server_msg.find(\"500:ERROR:\") + len(\"500:ERROR:\") : -1],\n )\n return output", "def get_temp(self, temp_number: int) -> Temperature:\n return Temperature(self.api, temp_number)", "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def get_temp():\n count = 0\n while True:\n # Temp\n output = subprocess.check_output(\n [\"/home/andy/python/bitbucket/pitemp/Adafruit_DHT\", \"2302\", \"4\"])\n count += 1\n print (\"Attempt %s: %s\") % (count, output)\n temp_match = re.search(\"Temp =\\s+([0-9.]+)\", output)\n humid_match = re.search(\"Hum =\\s+([0-9.]+)\", output)\n\n # if the beginning of output contains temp and numbers,\n # we can assume we are getting valid data\n if temp_match:\n temp = float(temp_match.group(1))\n humidity = float(humid_match.group(1))\n break\n\n return (temp, humidity)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather", "def search_engine(city_name):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/locations/v1/cities/search?apikey={API_Key}&q={city_name}&language=pt-br\"\n\n search_request = requests.get(http_request)\n\n if search_request.status_code != 200:\n print(f\"It was not possible to retrive information about {city_name}\")\n\n else:\n search_response = search_request.json()\n print(f\"Obtaining information about the weather in {city_name}\")\n\n return search_response[0]", "def temperature(self, temperature: float, from_unit: str) -> float:\n if not isinstance(temperature, Number):\n raise TypeError(f\"{temperature!s} is not a numeric value.\")\n\n return TemperatureConverter.convert(\n temperature, from_unit, self.temperature_unit\n )", "def weather()->str:\n event_log(\"retrieve weather data...\",\"\")\n location =read_json(\"weather_api\")[0] #grabs infro from json fucntion\n complete_api_link = \"https://api.openweathermap.org/data/2.5/weather?q=\"+location+\"&appid=\"+read_json(\"weather_api\")[1]+\"\"\n api_link = requests.get(complete_api_link)\n api_data = api_link.json()\n weather_desc = api_data['weather'][0]['description']#exctracts the wanted data from api\n return(weather_desc)", "def getWeatherString(index):\n return Texts.weather_titles[index]", "def get(self, cityname):\n response = hereService.getWeatherByCity(cityname)\n return response", "def getTemperature(self):\n return self.json_state.get(\"temperature\")", "def convert_temp(self, temperature):\n return 1.8 * (temperature - 273) + 32", "def GeoLocZip(zip_code, cntry):\r\n nb_error = 0\r\n #Try connection with OSM server\r\n while(nb_error < 100):\r\n try :\r\n #connection succeed\r\n time.sleep(1)\r\n g = geocoder.osm(str(zip_code)+' '+str(cntry))\r\n break\r\n except:\r\n #connection failed\r\n #try again\r\n nb_error += 1\r\n print(\"error req - nb_error : \"+str(nb_error))\r\n continue\r\n #g.osm['x'] = longitude\r\n #g.osm['y'] = latitude\r\n return g.osm['x'], g.osm['y']", "def geocode_zip():\n\n # Get user location \n zipcode = request.args.get('zipcode')\n location_result = client.geocode(zipcode)\n\n # Save needed geolocation in the session\n session['lat'] = location_result[\"results\"][0][\"location\"][\"lat\"]\n session['lng']= location_result[\"results\"][0][\"location\"][\"lng\"]\n\n city = location_result[\"results\"][0][\"address_components\"][\"city\"]\n state = location_result[\"results\"][0][\"address_components\"][\"state\"]\n session['user_facing_location'] = city + \", \" + state\n\n return jsonify(location_result)", "def getLatLng(zipcode=22207) -> (float, float):\n r = requests.get(f\"https://geocode.xyz/{zipcode}?json=1\")\n data = r.json()\n lat = data.get('latt')\n lng = data.get('longt')\n return lat, lng" ]
[ "0.6631745", "0.6549503", "0.6489142", "0.6372373", "0.63502383", "0.62462974", "0.6226757", "0.6073102", "0.6010607", "0.5896707", "0.58758765", "0.58282274", "0.57842165", "0.57408905", "0.57213604", "0.57094175", "0.56968915", "0.56946063", "0.5681781", "0.5656904", "0.5612414", "0.56036067", "0.55586815", "0.55378324", "0.5504577", "0.5503476", "0.5486682", "0.5482628", "0.5469357", "0.54655796", "0.54485714", "0.5374192", "0.5370242", "0.5365204", "0.5338557", "0.5315925", "0.5308781", "0.53041524", "0.5298482", "0.52880263", "0.52750576", "0.52362025", "0.5232086", "0.5221712", "0.5219866", "0.52142996", "0.5202642", "0.520179", "0.5201734", "0.519776", "0.515812", "0.51562625", "0.51397187", "0.5137205", "0.5111641", "0.5110469", "0.51103574", "0.51025254", "0.50952846", "0.5079604", "0.50763065", "0.50708705", "0.5057324", "0.50563365", "0.50526327", "0.5045851", "0.5040638", "0.503224", "0.502747", "0.5022611", "0.501114", "0.50085664", "0.5005732", "0.5004976", "0.49960634", "0.49910286", "0.49903372", "0.498917", "0.4986995", "0.49867377", "0.49831048", "0.4979502", "0.49717242", "0.49655247", "0.49644336", "0.49600992", "0.49571797", "0.4955775", "0.49529648", "0.49456066", "0.49414048", "0.49406198", "0.49353856", "0.49220976", "0.49153143", "0.49118954", "0.4909293", "0.49062088", "0.49056014", "0.49017817" ]
0.787071
0
Simple check to see if this cog (plugin) is enabled.
async def cog_check(self, ctx): guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id}) if guild_doc.get("Verification"): return True else: await ctx.send( embed=discord.Embed( description=( f"{var.E_DISABLE} The Verification plugin" " is disabled in this server" ), color=var.C_ORANGE ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_enabled(self):\n return self.sdk.is_enabled", "def is_enabled(self):", "def __enabled__(component):\n registry = context.app.component_registry\n return (component.__module__ in registry.modules)", "def is_on(self):\n return self._program.get(\"enabled\") is True", "def enabled(cls):\n return (cls is not Extension)", "def is_enabled(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-enabled').succeeded", "def is_active(self):\n return bool(\n self._grpc_port is not None and\n self._event_multiplexer and\n self._event_multiplexer.PluginRunToTagToContent(\n constants.DEBUGGER_PLUGIN_NAME))", "def is_enabled(self):\n return self.enabled", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "def is_on(self):\n return self.car.data[DATA_PLUGGED_IN]", "def Enabled(self) -> bool:", "def isActive(self, handler):\n if self.plugin_manager:\n enable_manager = self.plugin_manager.EnableManager()\n enable_manager.initFrom(self.c,self.handler_path) \n return handler.__module__ in enable_manager.actives\n else:\n return True", "def is_enabled(self) -> bool:\n return self._enabled", "def plugin_enabled_by_default(self):\n return self.__plugin_enabled_by_default", "def is_enabled(self):\n #check if alsa conf is sym linked to seeed files\n asoundconf = os.path.islink(u'/etc/asound.conf') and os.path.realpath(u'/etc/asound.conf')==u'/etc/voicecard/asound_2mic.conf'\n asoundstate = os.path.islink(u'/var/lib/alsa/asound.state') and os.path.realpath(u'/var/lib/alsa/asound.state')==u'/etc/voicecard/wm8960_asound.state'\n self.logger.debug(u'is enabled? asoundconf=%s asoundstate=%s' % (asoundconf, asoundstate))\n\n #check loaded system modules\n modules = True\n for module in self.MODULE_NAMES:\n if not self.lsmod.is_module_loaded(module):\n self.logger.debug(u'System module \"%s\" is not loaded' % module)\n modules = False\n\n return asoundconf and asoundstate and modules", "def check_enable_mode(self, *args, **kwargs):\n pass", "def is_on(self):\n return bool(self.enabled)", "def is_enabled(self):\n return self._is_enabled", "def is_enabled(command):\n if command not in Controller.commands:\n return False\n return Controller.commands[command][2]", "def is_enabled(self):\n return self._enabled", "def is_enabled(self):\n return self._enabled", "def is_enabled(self):\n self._raise_not_implemented()", "def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True", "def enabled(self) -> bool:\n return False", "async def cog_check(self, ctx: Context) -> bool: # type: ignore[override]\n\n return ctx.guild is not None", "def is_on(self):\n return self._data[\"enabled\"]", "def _auth_plugin_available(ext):\n return ext.obj.available", "def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)", "def is_enabled(self):\n try:\n import mdpopups\n except Exception:\n return False\n\n return (mdpopups.version() >= (1, 7, 3)) and (int(sublime.version()) >= 3118)", "def isPkgEnabled(self, *args):\n return _libsbml.SBase_isPkgEnabled(self, *args)", "def is_enabled(self, subsystem):\n return subsystem in self.subsystems", "def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"enabled\")", "def isEnabled(self) -> bool:\n ...", "def is_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_enabled\")", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def is_element_enabled(self):\n if self.web_element.is_enabled():\n return True\n else:\n return False", "def enabled(self) -> bool:\n\n return bool(self._enabled and self._api_key)", "def check_plugin(extension, namespace, names=None):\n if names is None or extension.name in names:\n plugin_enabled = extension.plugin.enabled()\n if not plugin_enabled:\n LOG.info(u'Extension with name %s for namespace %s is not enabled', extension.name, namespace)\n return plugin_enabled\n return False", "def enabled():\n installed = installedVersion()\n required = MIN_DCM2NIIX_VERSION\n return ((installed is not None) and\n (compareVersions(installed, required) >= 0))", "def is_enabled(self):\n element = self.driver.find_element(self.by, self.id)\n return element.is_enabled()", "async def enabled(self) -> bool:\n response = await self.adguard.request(\"parental/status\")\n return response[\"enabled\"]", "def enabled(self):\n return True", "def enabled(self):\n return True", "def isEnabled(self):\n return self.enabled", "def enabled(self) -> bool:\n return self._enabled", "def enabled(self) -> bool:\n return self._enabled", "def enabled(self) -> bool:\n return self._enabled", "def enabled(self) -> bool:\n return self._enabled", "def enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enabled\")", "def isEnabled(self):\n return self.__enabled", "def IsEnabled(self, var):\n return not self.IsCovered(var)", "def isEnabled(self, p_int): # real signature unknown; restored from __doc__\n return False", "def IsEnabled(self):\r\n\r\n return self._enabled", "def is_enabled(field):\n return not is_disabled(field)", "def is_check_mode_enabled(self):\n return self.in_check_mode", "def is_enabled(self, feature):\n if feature in self._disabled:\n return False\n if feature in self._enabled:\n return True\n return self.default_enabled is True", "def enabled(self) -> bool:\n return self._workerThread is not None", "def isEnabled(self):\n return _libsbml.SBMLExtension_isEnabled(self)", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_enabled\")", "def is_plugin_disabled(self, plugin: str) -> bool:\r\n with PluginStore.mutex:\r\n return plugin in self._disabled", "def is_in_service(self) -> bool:\n return self._enabled", "def __call__(self, feature):\n return self.is_enabled(feature)", "def get_isenabled(self):\n return self.isenabled", "def is_registered(self):\n return self.faucet is not None", "def is_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsEnabled', self.handle))", "def is_custom_mode_enabled(self):\n return os.environ.get('SNYK_CUSTOM_MODE', 'false').lower() in ('1', 'yes', 'true')", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")", "def is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_enabled\")" ]
[ "0.69357497", "0.6897424", "0.685277", "0.6834709", "0.68308216", "0.68143165", "0.6804309", "0.66417366", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.66176057", "0.66100556", "0.6593205", "0.65567094", "0.65470624", "0.653174", "0.65165657", "0.6492737", "0.64895916", "0.6470299", "0.64648026", "0.64648026", "0.6443683", "0.64403117", "0.6424451", "0.64228344", "0.64218277", "0.64106953", "0.6396079", "0.6383338", "0.638225", "0.6376849", "0.6368546", "0.6366202", "0.6366202", "0.6366202", "0.6366202", "0.6366202", "0.6366202", "0.6366202", "0.6366202", "0.6355915", "0.6349545", "0.63469845", "0.6337328", "0.6321235", "0.63141376", "0.63120854", "0.63116103", "0.6300579", "0.62579", "0.62579", "0.62494314", "0.62348276", "0.62348276", "0.62348276", "0.62348276", "0.62259096", "0.62101406", "0.61949056", "0.61815405", "0.6164166", "0.61554384", "0.6145074", "0.61422545", "0.61392164", "0.61222225", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.6121341", "0.610933", "0.6097892", "0.6097579", "0.6093639", "0.6086242", "0.6076688", "0.6070535", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657", "0.60596657" ]
0.6632713
8
Finds the token where the value is stored.
def _value_token_index(self): # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError('could not find a value token')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token", "def whereis_token(self, tid, silent=False):\n tk = self.get_token(tid)\n if tk:\n rs = tk.position()\n else:\n rs = None\n if not silent:\n msg = \"Token %s position is %s\" % (tid, rs)\n self.parser.status(msg)\n return rs", "def get_token(self, symbol):\r\n for token in self:\r\n if token[\"symbol\"].lower() == symbol.lower():\r\n return token\r\n return None", "def GetCurrentToken(tokens, pos):\n i = 0\n while i < len(tokens):\n if pos > tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n\n return tokens[len(tokens)-1] if tokens else None", "def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t", "def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value", "def token(self):\n return self[\"token\"]", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def search_token(self, message):\n\n # First search for variable name enclosed in single quotes\n m = re.search(\"'.*'\", message)\n\n # If there's no variable name search for nil-check message\n if m is None:\n m = re.search(r'nil(?=-check)', message)\n\n # If there's no nil-check search for method name that comes after a `#`\n if m is None:\n m = re.search(r'(?<=#)\\S+', message)\n\n return m.group(0) if m else None", "def find(self, value):\n for position in range(self.get_size()):\n if self.table[position] == value:\n return position", "def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None", "def find_node(self, value):\n cur = self.first\n while cur:\n if cur.value == value:\n return cur\n cur = cur.next\n return None", "def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")", "def token(self):\r\n return self._token", "def find_offset(self,value):\n return self.header.find_offset(value)", "def current(self) -> Token:\n return self.tokens[self.pos]", "def n_value(self, token):", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token", "def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))", "def _find(self, val, cur_node):\n if val == cur_node.data:\n return cur_node\n elif val > cur_node.data:\n if not cur_node.right:\n return None\n return self._find(val, cur_node.right)\n elif val < cur_node.data:\n if not cur_node.left:\n return None\n return self._find(val, cur_node.left)", "def key_word(self):\n return self.current_token", "def search(self, val):\n current = self.head\n found = False\n while current and not found:\n if current.val == val:\n found = True\n return current\n current = current.next\n return None", "def token(self) -> Token:\n return getattr(self, \"tok\", None)", "def location(self):\r\n return conf.lib.clang_getTokenLocation(self._tu, self)", "def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)", "def search_tree(token, root):\n matched_pos = []\n for node in PreOrderIter(root):\n # If a node is not defined in our searchable list, skip it\n if getattr(node, 'id') not in node_types.KEY_PROPERTY:\n continue\n else:\n for field in node_types.KEY_PROPERTY[getattr(node, 'id')]:\n if not hasattr(node, field):\n continue\n value = getattr(node, field)\n if token in str(value):\n matched_pos.append(node)\n\n if len(matched_pos) == 0:\n return None\n else:\n return matched_pos", "def token(self):\n print(\"getter of token called\")\n return self._token", "def string_val(self) -> str:\n return self.current_token", "def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))", "def search(self, value):\r\n node = self.head\r\n while node:\r\n if node.value == value:\r\n return node\r\n node = node.next\r\n raise ValueError('Value not found')", "def find(self, value: str, is_sorted=False) -> CompletionElement:\n if is_sorted:\n raise NotImplementedError( # pragma: no cover\n \"No optimisation for the sorted case.\"\n )\n for e in self:\n if e.value == value:\n return e\n return None", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def _findIdentifierValue (self, identifier : String) -> String:\n\n Logging.trace(\">>: %s\", identifier)\n cls = self.__class__\n\n if identifier not in self._keyToValueMap:\n # leave identifier as is (it might be some value name like\n # wahr or false\n Logging.traceError(\"no expansion found\")\n result = identifier\n else:\n result = self._keyToValueMap[identifier]\n\n if not isString(result):\n result = repr(result)\n else:\n result = (cls._doubleQuoteCharacter + result\n + cls._doubleQuoteCharacter)\n\n Logging.trace(\"<<: expanded %s into %r\", identifier, result)\n return result", "def token(self):\n\n return self.__token", "def search(self, val):\n search = self.head\n while search:\n if search.val == val:\n return search\n search = search.next\n return None", "def find(self, value):\n if self.value is None:\n raise BinaryTreeValueError(\"Value {} not in tree\")\n\n if self.value == value:\n return self.left_length\n\n elif value < self.value:\n # Value is in left side of tree\n return self.left.find(value)\n\n else:\n # Value is in right side of tree\n return self.right.find(value) + self.left_length + 1", "def _get_value(self, node):\n val = None\n if isinstance(node, ast.Str):\n val = node.s\n elif isinstance(node, ast.BinOp):\n if pairwise_isinstance(\n (node.op, ast.Mod), (node.left, ast.Str),\n (node.right, ast.Name)):\n val = node.left.s % self.globals_[node.right.id]\n elif pairwise_isinstance(\n (node.op, ast.Add), (node.left, ast.Name),\n (node.right, ast.Str)):\n val = self.globals_[node.left.id] + node.right.s\n elif isinstance(node, ast.Name):\n val = self.globals_[node.id]\n\n if val is None:\n raise ValueError(\n \"Unable to find value in %s, only the following are parsed: \"\n \"GLOBAL, 'pkg.foobar', '%%s.foobar' %% GLOBAL or 'GLOBAL + \"\n \"'.foobar'\"\n % ast.dump(node))\n\n return val", "def find(self, value):\n # initialize node as root\n node = self.root\n\n # find value\n while node != None:\n\n # value found: return node\n if node.value == value:\n return node\n\n # value is smaller than node: search in left sub tree\n elif node.value > value:\n node = node.left\n\n # value is bigger than node: search in right sub tree\n else:\n node = node.right\n\n # value not found: return None\n return None", "def find(self, value):\n bucketNum = self.__hash(value)\n result = self.__buckets[bucketNum].find(value)\n return result", "def find_value(self, value_to_find):\n pos = np.where(self.board == value_to_find)\n # convert pos to tuple of ints\n return tuple(ax[0] for ax in pos)", "def find(self,item):\n sig = str(item)\n try:\n return self.index[sig]\n except:\n return None", "def search(self, value):\n return self._search(self.head, value)", "def find_which(self, mu, which_to_find):\n ind, which = self.find(mu)\n if ind is not None and which_to_find in which:\n return which[which_to_find]\n else:\n return None", "def first_token(self):\n if self.tokens:\n return self.tokens[0]\n return \"None\"", "def get_token(self):\n\n return self._token", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def get_value_of_token(self, query) -> float:\n # make it a KappaToken, if it's not one already\n if not type(query) is KappaToken:\n q = KappaToken(query)\n else:\n q = query\n # return value, if token is present\n if q.get_token_name() in self._tokens:\n value = float(self._tokens[q].get_token_operation())\n else:\n warnings.warn('Token <' + str(query) + '> not found in this snapshot.')\n value = None\n return value", "def _peek(self):\n return self.token_list[self._current]", "def find(self, val):\n\n\t\tif not self.root:\n\t\t\treturn None\n\n\t\tQ = [self.root]\n\t\twhile Q:\n\t\t\tnode = Q.pop(0)\n\n\t\t\tif node.val == val:\n\t\t\t\treturn node\n\n\t\t\tif node.left:\n\t\t\t\tQ.append(node.left)\n\n\t\t\tif node.right:\n\t\t\t\tQ.append(node.right)\n\n\t\treturn None", "def findParameter(self, pos):\n text = self.text()\n comma_pos = text.find(',', pos)\n if comma_pos == -1:\n comma_pos = len(text)\n left_comma = text.rfind(',', 0, comma_pos) + 1\n left_eq = text.rfind('=', 0, comma_pos) + 1\n left_delim = max(left_comma, left_eq)\n start = left_delim\n length = comma_pos - left_delim\n return start, length", "def token(self) -> Optional[str]:\n return self._builder._token", "def match(self, token):\n\n if self.la == token:\n self.la, self.val = self.next_token()\n else:\n raise ParseError(\"found {} instead of {}\".format(self.la, token))", "def _index_lookup(self, key: int) -> str:\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token", "def find_node(self, value):\n for (fun, node) in self.__root.__fast_find:\n if fun(value):\n return node\n return None", "def lookupVal(self, val):\n pybtlib.lookupVal.restype = ctypes.c_int\n pybtlib.lookupVal.argtypes = [ctypes.POINTER(Tree), ctypes.c_int]\n return pybtlib.lookupVal(ctypes.byref(self), val)", "def Token(self) -> Token:\r\n\t\treturn self._token", "def get_token_from_offset(self, offset):\n # type: (int) -> Token\n return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]", "def search(self, val):\n if not self.root:\n return None\n else:\n return self._find(val, self.root)", "def ahead(self, k):\n assert k == 1\n if self.pos + k < len(self.tokens):\n return self.tokens[self.pos + k]\n return None", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def find_special_token_index(identified_concepts: IdentifiedConcepts, special_token: str):\n for i in range(len(identified_concepts.ordered_concepts)):\n concept = identified_concepts.ordered_concepts[i]\n if concept.name == special_token:\n return i\n return -1", "def id(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)", "def find_value(dic, key):\n return dic[key]", "def token(self, ent):\n if ent.eid in self.lookup: return self.lookup[ent.eid]\n\n if ent.tid == data.AGENT_TYPE_ID: prefix = 'A'\n elif ent.tid in data.combatants: prefix = 'C'\n elif ent.tid in data.gatherable: prefix = 'G'\n\n ct = self.counts[prefix]\n self.counts[prefix] += 1\n tok = \"{}{}\".format(prefix, ct)\n self.lookup[ent.eid] = tok\n return tok", "def _find_if_hashclash(self, key: str, location: int, key_or_value: str):\n if key in self.HashMap[location]:\n idx = self.HashMap[location].index(key)\n else:\n idx = None\n\n if idx is not None:\n if key_or_value == \"v\":\n return idx + 1\n else:\n return idx", "def get(tokens, x):\n\n if 0 <= x < len(tokens):\n return tokens[x]\n\n return None", "def find(self, value):\n index = self.intervals.bisect_left(value)\n if index < len(self.intervals) and self.intervals[index].lower == value:\n return self.intervals[index]\n if index > 0 and self.intervals[index - 1].contains(value):\n return self.intervals[index - 1]\n return None", "def find_reg(self, reg):\n for key, val in self.register_def.iteritems():\n if reg in val.keys():\n return key, val[reg]\n return None, None", "def process_id_from(self):\r\n return self._tokens[1]", "def search(self, val):\n if not self.head:\n raise IndexError('Cannot search empty list.')\n\n current_node = self.head\n\n while current_node:\n if current_node.val == val:\n return current_node\n current_node = current_node.next", "def peek_token(self):\n tok = next(self)\n self.unpop_token(tok)\n return tok", "def __findPlaceholder(self, data, index):\r\n m = self.__placeholder_re.search(data, index)\r\n if m:\r\n return m.group(1), m.end()\r\n else:\r\n return None, index + 1", "def find_token_for_authorization(authorization):\n return None", "def findSymbol(self, exp):\n k = str(exp)\n try:\n return self.currSyms[k]\n except KeyError:\n raise SymbolNotFound('Identifier not found:<%s>' % (k))", "def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None", "def find(self, number: str) -> Optional[str]:\n if number in self.data: # noqa\n return number\n else:\n return None", "def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")", "def _word_lookup(self, key: str) -> int:\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx", "def get_token(self, name):\n if self.kv.get(name):\n return self.kv.get(name)\n token = self.random_string(24)\n self.kv.set(name, token)\n return token", "def get_next_segment_value(self):\n segval = \"\"\n if TestScriptElementType.get_element_type(self.next_segment) == \"VARIABLE\":\n segval = TestScriptSymbolTable.get_value_from_sym_tab(self.next_segment, TestScriptSymbolTable.test_script_sym_tab)\n segval = segval if segval else \"\"\n elif TestScriptElementType.get_element_type(self.next_segment) == \"STRING\":\n segval = self.next_segment\n\n return segval", "def _find(self, value): \n # case 1: look deeper, left\n if self.value > value and self.left is not None:\n return self.left._find(value)\n\n # case 2: look deeper, right\n if self.value < value and self.right is not None:\n return self.right._find(value)\n\n # case 3: found it, or nothing to find\n else:\n return self", "def find_symbol(self, op):\n for ii in self.__symbols:\n if ii.get_name() == op:\n return ii\n return None", "def _find_first(self, ast, label):\n res = self._find_all(ast, label, max_results=1)\n if len(res):\n return res[0]\n return None", "def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None", "def get_token(html, pattern):\n result = pattern.search(html)\n if result:\n return result.group(1)\n else:\n error('Failed to find token')\n return None", "def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False", "def find(self, value):\n if self.value == value:\n return True\n else:\n if self.value > value:\n if not isinstance(self.left, Node):\n return False\n else:\n return self.left.find(value)\n elif self.value < value:\n if not isinstance(self.right, Node):\n return False\n else:\n return self.right.find(value)", "def read_token(self):\n self._skip_white_space()\n return self._get_token()", "def find(self, value):\n bucketNum = self.__hash(value)\n originalBucketNum = bucketNum\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n bucketNum = self.__rehash(bucketNum)\n while self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] != value and \\\n bucketNum != originalBucketNum:\n bucketNum = self.__rehash(bucketNum)\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n return None", "def get_token(self, tid):\n if self.lliagraph:\n return self.lliagraph.get_token(tid)\n else:\n return None", "def getWordIdx(token, word2Idx): \n if token in word2Idx:\n return word2Idx[token]\n elif token.lower() in word2Idx:\n return word2Idx[token.lower()]\n \n return word2Idx[\"UNKNOWN_TOKEN\"]", "def get_current_token(self):\n with self._lock:\n if self._unfinished_ids:\n return self._unfinished_ids[0] - self._step\n\n return self._current", "def parse_token(page_html):\n offset = 7\n token = page_html.find(\"token\")\n start_pos = (page_html[token:]).find('value=\"') + token\n end_pos = (page_html[start_pos + offset:]).find('\"') + start_pos + offset\n\n return page_html[start_pos + offset:end_pos]", "def read_value(where, ttype=None):\n ttype = VOID_P if ttype is None else ttype\n\n frame = gdb.selected_frame()\n if where.startswith(\"$\"):\n return frame.read_register(where[1:]).cast(ttype)\n else:\n to_parse = \"(%s) %s\" % (str(ttype), where)\n return gdb.parse_and_eval(to_parse)" ]
[ "0.6786406", "0.6442385", "0.631681", "0.61292017", "0.60891455", "0.60803986", "0.5987252", "0.5974573", "0.5974573", "0.59730595", "0.5952903", "0.5946502", "0.59407806", "0.5928285", "0.5908328", "0.5895346", "0.5840744", "0.5825658", "0.58217704", "0.58217704", "0.58217704", "0.5820745", "0.5816943", "0.5813473", "0.57983875", "0.57854855", "0.57698554", "0.57656956", "0.57608443", "0.5752086", "0.57227474", "0.5719494", "0.5713588", "0.5703582", "0.5698303", "0.5693262", "0.569109", "0.56908786", "0.5689186", "0.56861365", "0.56765026", "0.5656638", "0.5652952", "0.56360894", "0.5608852", "0.56045985", "0.5604157", "0.5597015", "0.5572681", "0.5572012", "0.5567219", "0.5558774", "0.5547731", "0.5539673", "0.552153", "0.55054766", "0.5498092", "0.54950553", "0.549264", "0.54920375", "0.5487186", "0.5468672", "0.5463441", "0.54568624", "0.54568624", "0.54452693", "0.5442678", "0.5438945", "0.5436227", "0.54295826", "0.5427559", "0.5426058", "0.5424648", "0.5406653", "0.54013664", "0.5391535", "0.53888226", "0.538556", "0.53755146", "0.5372834", "0.5371839", "0.5368946", "0.53688544", "0.53650635", "0.5364729", "0.53471994", "0.53410625", "0.53331995", "0.5332437", "0.5332002", "0.5331428", "0.53278047", "0.5324844", "0.5323108", "0.5320916", "0.53207886", "0.53205305", "0.5319199", "0.5316214", "0.5296493" ]
0.6961675
0
Returns a Python value contained in this atomic element.
def value(self): return toml2py.deserialize(self._tokens[self._value_token_index()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvalue(self):\n return BytesIO.getvalue(self)", "def value(self):\n\n\t\treturn self.__value", "def value(self):\n return self._read()", "def value(self) -> Any:\n return self._value", "def value(self):\n return self.__value", "def value(self):\n return self.__value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._val", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def value(self):\n return self.raw.get_attribute(\"value\")", "def value(self):\n return self.value()._value", "def read_value(self):\n return self.load_attr(\"value\")", "def value(self):\n return self._value_", "def value(self):\n return self._", "def value (self) :\n\n return self.__value__", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def get_value(self):\n return self._val", "def get(self):\n return self._value", "def native_value(self):\n return self._handle(\"native_value\")", "def value(self):\n return self.get_data(\"value\")", "def value(self):\n return self.node_value", "def getValue(self):\n \n return self._value", "def get_value(self):\n return self._value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def value(self) -> any:\r\n\r\n return self.__value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def value(self):\n\n return self._value", "def _get_value(self):\n return self.__value", "def value(self):\r\n return self._data['value']", "def value(self):\n return self._data", "def getValue(self):\n return self.value", "def value(self) -> Any:\n return pickle.loads(self.pickled_value)", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n\n return self.element().get_attribute('value') if self.exists() else ''", "def _get_value(self):\n \n return self._value", "def getval(self):\r\n return self.value", "def value(self):\n\n return self._get_cached_value(self._value)", "def getValue(self):\n return self.value", "def get_val(self):\n return self.value", "def value(self):\n return self.data", "def value(self):\n return self.get_attribute(\"value\", str(self.children))", "def value(self):\n return self.string", "def value (self):\r\n return self.entry.get()", "def _value(self):\n return self.device.value(*self._id[1:])", "def get_value(self) -> Any:\n raise NotImplementedError()", "def load(self):\n return self._value", "def get(self):\n value = self.entry.get()\n return value", "def getvalue(self):\n return str(self.data)", "def read(self):\n return self.get_attr().Value()", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def get_value(self):\n raise NotImplementedError", "def value(self) -> str:\n return self[\"Value\"]", "def getValue(self):\n return _libsbml.ASTNode_getValue(self)", "def value(self):\n return self.compute_value()", "def get_value(self):\n pass", "def unmarshal(self):\n return self.value", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")" ]
[ "0.75496817", "0.7389974", "0.7359954", "0.7347043", "0.733962", "0.733962", "0.7327104", "0.7327104", "0.7303945", "0.7303945", "0.7303945", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.72900474", "0.7287122", "0.7287122", "0.7271284", "0.7266666", "0.72646344", "0.72415733", "0.72311735", "0.720859", "0.7203227", "0.7203227", "0.720014", "0.7199517", "0.71941346", "0.71934843", "0.7165272", "0.7156097", "0.7149394", "0.71234107", "0.71234107", "0.71234107", "0.7121223", "0.7105494", "0.7105494", "0.7105494", "0.7100945", "0.7099248", "0.70694625", "0.7062844", "0.705776", "0.70504576", "0.7038126", "0.7038126", "0.7038126", "0.70366347", "0.7035039", "0.7033091", "0.7028139", "0.7025463", "0.7024049", "0.70216805", "0.69863194", "0.6966929", "0.69659096", "0.69568187", "0.69464123", "0.6915651", "0.6889439", "0.68415266", "0.68093985", "0.6806972", "0.6806972", "0.6806972", "0.68019485", "0.67811334", "0.67797446", "0.6762927", "0.676261", "0.67482054", "0.673466", "0.673466" ]
0.68537635
87
Sets the contained value to the given one.
def set(self, value): assert (not is_sequence_like(value)) and (not is_dict_like(value)), 'the value must be an atomic primitive' token_index = self._value_token_index() self._tokens[token_index] = py2toml.create_primitive_token(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_value(self, field, value):\n field = self.find_first(field)\n if field is not None:\n field.value = value", "def assignValue(self,value):\n self.itemset(value)", "def assignValue(self,value):\n self.itemset(value)", "def set_value(self,x):\n self._value = x", "def set_value(self,x):\n self._value = x", "def set(self, node, value):\n self.val[node] = value", "def set_value (self):\n raise NotImplementedError", "def setValue(self, value):\n self.setValues((value, value))", "def assign(self, value):\n self.value = value", "def set(self, value):\n self._storage.set(self._item, value)", "def setval(self, newval) -> None:\n if self.val is None:\n self.val = newval\n else:\n raise RuntimeError('LocNode value set twice!')", "def value(self, value):\n self.set_data(value)", "def set_value(self, value: Hashable):\n\t\tself._value = value\n\t\tself._potential_values.clear()", "def setval(self, val):\r\n self.value = val", "def set_value ( self, object, value ):\n object[ self.index ] = value", "def set_value(self, val):\n self.value = val", "def set(self, value):\n if value is None:\n self.value = [] if self.list else None\n else:\n value = self.cast(value)\n if self.list:\n self.value.append(value)\n else:\n self.value = value", "def set(self, obj, value):\n pass", "def _set(self, value):\n self._local._value = value\n if threading.current_thread() is self._root_thread:\n self._root_value = value", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def setValue(self, name: unicode, value: object) -> None:\n ...", "def set(self, name, value):\n pass", "def set_value(self, val):\n self._value = val", "def set_value(self, val):\n self._value = val", "def _setValue(self, field, value):\n self._contents[field] = value", "def set_value(self, value):\n if self.value:\n raise ValueError(\"Already has a Value:\", self)\n\n self.value = value\n\n if self.value != 0:\n self.possible = None\n self.solved = True", "def value(self, value):\n\n\t\tself.__value = value", "def setValue(self, value):\n self._value = value", "def _set_value(self, value, name, option):\r\n self.set_value(name, option, value)", "def put(self, item, value, set_doc):\n if item is None:\n raise Exception(\"call __setitem__ with None argument\")\n else:\n self.size += 1\n self.root = self.set(self.root, item, int(value), 0, set_doc)", "def __set__(self, instance, value):\n instance._values[self.name] = self.process(value)", "def set(self, item, value):\r\n raise NotImplementedError", "def set(self, U):\n pass", "def set(self, U):\n pass", "def __set__(self, page, value):\n element = self.get(page)\n element.value = value", "def set_val(self, input):\n return", "def set(self, value):\n if value == self.value:\n return False\n self.value = value\n return True", "def set(self, obj, value):\n raise NotImplementedError", "def _set_item_impl(self, key: Any, value: Any) -> None:\n from omegaconf.omegaconf import _maybe_wrap\n\n from .nodes import AnyNode, ValueNode\n\n if isinstance(value, Node):\n do_deepcopy = not self._get_flag(\"no_deepcopy_set_nodes\")\n if not do_deepcopy and isinstance(value, Container):\n # if value is from the same config, perform a deepcopy no matter what.\n if self._get_root() is value._get_root():\n do_deepcopy = True\n\n if do_deepcopy:\n value = copy.deepcopy(value)\n value._set_parent(None)\n\n try:\n old = value._key()\n value._set_key(key)\n self._validate_set(key, value)\n finally:\n value._set_key(old)\n else:\n self._validate_set(key, value)\n\n if self._get_flag(\"readonly\"):\n raise ReadonlyConfigError(\"Cannot change read-only config container\")\n\n input_config = isinstance(value, Container)\n target_node_ref = self._get_node(key)\n special_value = value is None or value == \"???\"\n\n input_node = isinstance(value, ValueNode)\n if isinstance(self.__dict__[\"_content\"], dict):\n target_node = key in self.__dict__[\"_content\"] and isinstance(\n target_node_ref, ValueNode\n )\n\n elif isinstance(self.__dict__[\"_content\"], list):\n target_node = isinstance(target_node_ref, ValueNode)\n # We use set_value if:\n # 1. Target node is a container and the value is MISSING or None\n # 2. Target node is a container and has an explicit ref_type\n # 3. If the target is a NodeValue then it should set his value.\n # Furthermore if it's an AnyNode it should wrap when the input is\n # a container and set when the input is an compatible type(primitive type).\n\n should_set_value = target_node_ref is not None and (\n (\n isinstance(target_node_ref, Container)\n and (special_value or target_node_ref._has_ref_type())\n )\n or (target_node and not isinstance(target_node_ref, AnyNode))\n or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))\n )\n\n def wrap(key: Any, val: Any) -> Node:\n is_optional = True\n if not is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n target = self._get_node(key)\n if target is None:\n if is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n is_optional = target._is_optional()\n ref_type = target._metadata.ref_type\n return _maybe_wrap(\n ref_type=ref_type,\n key=key,\n value=val,\n is_optional=is_optional,\n parent=self,\n )\n\n def assign(value_key: Any, val: ValueNode) -> None:\n assert val._get_parent() is None\n v = val\n v._set_parent(self)\n v._set_key(value_key)\n self.__dict__[\"_content\"][value_key] = v\n\n if input_node and target_node:\n # both nodes, replace existing node with new one\n assign(key, value)\n elif not input_node and target_node:\n # input is not node, can be primitive or config\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)\n elif input_node and not target_node:\n # target must be config, replace target with input node\n assign(key, value)\n elif not input_node and not target_node:\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)", "def change_value(self,val):\n self.val = val", "def value(self, value):\n self._update_value(value)", "def set_value(self, value):\n self.value = value\n return self", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def set_value(self, new_value, borrow=False):\r\n if borrow:\r\n self.container.value = new_value\r\n else:\r\n self.container.value = copy.deepcopy(new_value)", "def pre_set(self, value):\r\n return value", "def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)", "def set_Value(self, n_value):\n#Joerg S/Martin W advice\n self.StoredValue=n_value", "def __set__(self, obj, value):\r\n pass", "def set_entry(self, val):\n self.value = val", "def set(self, other):\n if self.id is None:\n self._values = other\n else:\n if type(other) is float:\n self._set_val(other)\n if type(other) is _np.ndarray:\n self._set_array(other)\n if _com.isField(other):\n self._set_array(other.get())", "def set(self, key: t.Hashable, value: t.Any) -> None:", "def set_value(self, value):\n if value not in self.domain and value is not None:\n raise ValueError\n\n self.value = value", "def __set__(self, instance, value):\r\n if instance:\r\n return instance._values[self.column.column_name].setval(value)\r\n else:\r\n raise AttributeError('cannot reassign column values')", "def set(self, value):\n self.value = value\n self.synced = True", "def setValue(self, v):\n old = self.value\n self.value = v\n self.put(self.key, v)\n return old", "def _set_value(o, d):\n if isinstance(o, Param) and not o._mutable:\n return # ignore requests to set immutable params\n else:\n try:\n o.value = d\n except AttributeError:\n o = d # this would be an indexed parameter", "def set(self, val: int) -> None:\n self.val = val\n self.notes = []", "def __setitem__(self, index, value):\n self.elem[index] = value", "def __set__(self, stack: \"stack.Stack\", value: Any):\n with self._lock:\n self.assign_value_to_stack(stack, value)", "def _set(self, driver: AbstractHasFeatures, value: Any):\n with driver.lock:\n set_chain(self, driver, value)", "def set(self, value):\n\t\t# The assertion is not thread-safe, but merely a sanity check.\n\t\tassert not self.event.is_set()\n\t\tself.value = value\n\t\tself.event.set()", "def setvalue(self, index, value):\n self._checkIndex(index)\n self._items[index].value = value", "def setValue(self,val):\n if val:\n self.input.setValue(val)", "def _set( self, spec, value ):\n key, patt, conf = spec\n if 'count' in conf:\n self._values[ key ].append( value )\n else:\n self._values[ key ] = value", "def setValue(self,val):\n self.input.setValues(val)", "def set(self, value):\n if not self.independent:\n raise TypeError(\"Cannot set the value of a cell that is not independent\")\n self._set(value)\n return self", "def _val(self, value):\n cast_val = self._cast(value)\n nval = cast_val\n\n if not self._validate(nval):\n self._setter_error('is invalid', cast_val)\n nval = self._default\n\n h_ok, nval = self._run_hook(nval)\n if not h_ok:\n self._setter_error('is invalid (hook)', cast_val)\n\n self.__val = nval", "def set_values(self, value):\n for i in range(len(self)):\n self._elements[i] = value", "def value(self, value):\n self._value = value\n self.is_dirty = True", "def _call_set(vecObj, val):\n res = vecObj.set(val)\n return res", "def setValue(self,val):\n for f,v in zip(self.fields,val):\n f.setValue(v)", "def set(self, name, value):\n self.__getitem__(name).clear()\n self.add(name, value)", "def __set__(self, instance, value):\n # If setting to None and there is a default value provided for this\n # field, then set the value to the default value.\n if value is None:\n if self.null:\n value = None\n elif self.default is not None:\n value = self.default\n if callable(value):\n value = value()\n\n if instance._initialised:\n try:\n value_has_changed = (\n self.name not in instance._data\n or instance._data[self.name] != value\n )\n if value_has_changed:\n instance._mark_as_changed(self.name)\n except Exception:\n # Some values can't be compared and throw an error when we\n # attempt to do so (e.g. tz-naive and tz-aware datetimes).\n # Mark the field as changed in such cases.\n instance._mark_as_changed(self.name)\n\n EmbeddedDocument = _import_class(\"EmbeddedDocument\")\n if isinstance(value, EmbeddedDocument):\n value._instance = weakref.proxy(instance)\n elif isinstance(value, (list, tuple)):\n for v in value:\n if isinstance(v, EmbeddedDocument):\n v._instance = weakref.proxy(instance)\n\n instance._data[self.name] = value", "def setValue(self, *args):\n return _libsbml.SpeciesFeatureValue_setValue(self, *args)", "def set(self, key, value):", "def set(self, key, value):", "def value(self, value):\n if self.value == value: # case where we are setting at the same value\n return\n if (not self.has_data) or self.is_unknown or self.is_byte:\n if not ida_bytes.patch_byte(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_word:\n if not ida_bytes.patch_word(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_dword:\n if not ida_bytes.patch_dword(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n elif self.is_qword:\n if not ida_bytes.patch_qword(self.ea, value):\n raise RuntimeError(\"Unable to patch value: {}\".format(self))\n else:\n raise RuntimeError(\"Unable to patch value: {}\".format(self))", "def _setitem_impl(self, index, obj, value):\n obj.set_value(value)\n return obj", "def setfield(self, field, value):\n self.__setitem__(field, value)", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def value(self, value):\n if value is None:\n raise ValueError(\"Invalid value for `value`, must not be `None`\")\n\n self._value = value", "def value(self, value):\n if value is None:\n raise ValueError(\"Invalid value for `value`, must not be `None`\")\n\n self._value = value", "def set(self, val):\n daskD.wait(self.client.map(_call_set, self.vecDask, val=val, pure=False))\n return self", "def value(self, value):\n if isinstance(value, (list, tuple)):\n if self.is_tag:\n value = ':'.join(value)\n else:\n log.e('Multiple values passed to non-tag node', stack=True)\n raise Exception('Multiple values passed to non-tag node')\n if value == self.__value:\n return\n self.__value = value\n #pylint: disable=protected-access\n self.root._modified = True", "def value(self, value):\n\t\toldvalue = self._value\n\t\tself._value = value\n\t\tif oldvalue != value:\n\t\t\tself.changed()", "def result(self, value):\n self.set_local(0, value)", "def setValue(self, value):\r\n # Clamp values to [0,1]\r\n self.__value = max(0, min(value, 1))", "def _setValue( self, client, value ):\n\t\treturn client.setValue( self.schema, value )", "def set_value ( self, object, value ):\n target, name = self.target_name( object )\n setattr( target, name, value )", "def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))", "def set_value(self, value):\n self.value = str(value)", "def __setitem__(self, key, value):\n\n fi = self.arbor.field_info[key]\n ftype = fi.get('type')\n if ftype not in ['analysis', 'analysis_saved']:\n raise ArborUnsettableField(key, self.arbor)\n\n vector_fieldname = fi.get(\"vector_fieldname\", None)\n has_vector_field = vector_fieldname is not None\n\n if self.is_root:\n root = self\n tree_id = 0\n # if root, set the value in the arbor field storage\n self.arbor[key][self._arbor_index] = value\n if has_vector_field and vector_fieldname in self.arbor.field_data:\n del self.arbor.field_data[vector_fieldname]\n else:\n root = self.root\n tree_id = self.tree_id\n self.arbor._node_io.get_fields(self, fields=[key],\n root_only=False)\n data = root.field_data[key]\n data[tree_id] = value\n if has_vector_field and vector_fieldname in root.field_data:\n del root.field_data[vector_fieldname]", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def assign(self, value):\n generator = (bits for bits in self.data if bits.is_null)\n if isinstance(value, int):\n series_of_bits = next(generator, _bits_sentinel)\n series_of_bits.assign(value)\n elif isinstance(value, typing.Iterable):\n for value, bits in zip(value, generator):\n bits.assign(value)\n return self" ]
[ "0.7084912", "0.7043695", "0.7043695", "0.702026", "0.702026", "0.70160013", "0.70115966", "0.69155234", "0.69025266", "0.68340504", "0.6826874", "0.6826183", "0.67819715", "0.6768549", "0.67635816", "0.6759227", "0.67537683", "0.6748124", "0.6729947", "0.67173505", "0.67173505", "0.67173505", "0.6708209", "0.6672062", "0.6671675", "0.6671675", "0.6671657", "0.6669432", "0.66611236", "0.6651513", "0.66481924", "0.66323745", "0.6628233", "0.66210735", "0.6618751", "0.6618751", "0.65953946", "0.65915346", "0.65893537", "0.6571693", "0.6525544", "0.6518064", "0.65173995", "0.65171856", "0.65106267", "0.65106267", "0.65106267", "0.65106267", "0.65106267", "0.65106267", "0.65028405", "0.6490768", "0.6490148", "0.64864844", "0.6479479", "0.6445068", "0.640686", "0.63711536", "0.63701755", "0.6368628", "0.6358249", "0.63570243", "0.6347318", "0.6326249", "0.63218236", "0.6305928", "0.62916106", "0.62876946", "0.6281111", "0.62764704", "0.62621874", "0.62487304", "0.62475276", "0.6244141", "0.62436706", "0.6243285", "0.62300354", "0.6226514", "0.62239635", "0.618966", "0.61765325", "0.616703", "0.616703", "0.615682", "0.6155501", "0.6150064", "0.6143277", "0.61404765", "0.61404765", "0.61384255", "0.6134474", "0.61286163", "0.6120659", "0.6105489", "0.6098972", "0.6091024", "0.6087452", "0.60835356", "0.6080207", "0.60776144", "0.6075881" ]
0.0
-1
>>> import shutil >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> ori = cv1.name >>> des = cv1.copy() >>> cv1.name == ori False >>> f.close() >>> shutil.rmtree(basepath)
def copy(self, des=None, name=None): if des is None: des = self.source_path if name is None: name = self.name location = os.path.join(des, name) while os.path.isfile(location) is True: self.base.reset_random() self.name = self.base.random name = self.name location = os.path.join(des, name) with open(location, 'wb') as f: f.write(self.stream) return location
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def test_copy_without_name(self):\n self.source[\"name\"] = \"\"\n source_copy = copy_source(self.source, self.DATA_MODEL)\n self.assertEqual(\"pip (copy)\", source_copy[\"name\"])", "def copydoc(self, doc):\n dst = os.path.join(self.path, os.path.split(doc)[1])\n if not self.rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)\n return dst", "def test_copy_name(self):\n source_copy = copy_source(self.source, self.DATA_MODEL)\n self.assertEqual(\"Source (copy)\", source_copy[\"name\"])", "def test_copy_without_name(self):\n self.subject[\"name\"] = \"\"\n subject_copy = copy_subject(self.subject, self.DATA_MODEL)\n self.assertEqual(\"Software (copy)\", subject_copy[\"name\"])", "def test_copy_name(self):\n subject_copy = copy_subject(self.subject, self.DATA_MODEL)\n self.assertEqual(\"Subject (copy)\", subject_copy[\"name\"])", "def deleteconvert(self):\n filename = os.path.join(self.docx_path, self.name.docx)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.html_path, self.name.html)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.docbook_path, self.name.xml)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.markdown_path, self.name.md)\n if os.path.isfile(filename):\n os.remove(filename)", "def cleanup(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n filename = os.path.join(asciitest_out_dir, save_cmake_filename(doc_file)).replace(\"\\\\\",\"/\")\n \n #print(\"cleanup %s %s\" % (doc_file, filename))\n try:\n os.remove(filename)\n except:\n pass", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def test_copy_without_name_change(self):\n source_copy = copy_source(self.source, self.DATA_MODEL, change_name=False)\n self.assertEqual(\"Source\", source_copy[\"name\"])", "def test_cpoy_clean():\n home_dir = \"/home/xyz\"\n result_root = \"tq-data01\"\n root_folder = \"sentinel1_GRD\"\n local_path = \"/home/xyz/data_pool/test_data/sentinel_GRD/77\"\n suffix_pattern = suffix_pattern = [\"/*.data\", \"/*.dim\", \"/*.SAFE\"]\n\n logger.info(\"Delete test\")\n res_path, flag = CCL.copy_clean_local(\n home_dir, result_root, root_folder, local_path, suffix_pattern\n )\n logger.info(\"%s, %s\", res_path, flag)", "def test_written(self):\n\n checkit=subprocess.run([\"python\", \"../../taxonomy/src_files/rdp_lineage_to_tax.py\", \"-i\", \"../resource_files/rdp_test_taxonomy.csv\", \"-o\", \"test_rdp_taxonomy\"], capture_output=True, text=True)\n \n # is the folder there\n self.assertTrue(os.path.exists(os.path.exists(\"../processed_files/rdp_prep_taxonomy\")))\n \n # there should be 2 files in there\n files_in_dir=os.listdir(\"../processed_files/rdp_prep_taxonomy\")\n self.assertEqual(len(files_in_dir), 2)\n \n for x in files_in_dir:\n if x.split('.')[-1]=='txt':\n taxonomy_file=x\n \n # does the test match the provided actual output\n # rdp_team_taxonomy_check can be found on https://github.com/rdpstaff/classifier/issues/18\n self.assertTrue(filecmp.cmp(\"../resource_files/rdp_team_taxonomy_check.txt\", \"../processed_files/rdp_prep_taxonomy/{}\".format(taxonomy_file)))\n \n shutil.rmtree(\"../processed_files/rdp_prep_taxonomy\")", "def test_copy_without_name_change(self):\n subject_copy = copy_subject(self.subject, self.DATA_MODEL, change_name=False)\n self.assertEqual(\"Subject\", subject_copy[\"name\"])", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_no_deletion(self):\n\t\tanalyse_text(self.filename)\n\t\tself.assertTrue(os.path.exists(self.filename))", "def copydocs(store, path, rc):\n for doc in rc.documents:\n dst = os.path.join(path, os.path.split(doc)[1])\n if not rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)", "def copy_result(finfo):\n create_dir(finfo['resultdir'])\n try:\n shutil.copyfile(joinp(finfo['workingdir'], finfo['tempfilehash']+'.pdf.complete'),\n finfo['resultpath'])\n except IOError as e:\n finfo['errors'] += 'File copy:\\n' + e.filename + '\\n'\n shutil.rmtree(finfo['workingdir'])", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def test_object_renaming_is_not_journalized(self, browser):\n self.login(self.regular_user, browser=browser)\n create(Builder('document').within(self.empty_dossier))\n\n browser.open(self.empty_repofolder, view=\"copy_items\",\n data=self.make_path_param(self.empty_dossier))\n browser.css('#contentActionMenus a#paste').first.click()\n\n copy = self.empty_repofolder.objectValues()[0]\n\n browser.open(copy, view='tabbedview_view-journal')\n listing = browser.css('.listing').first\n self.assertEqual(\n [u'Dossier added: An empty dossier',\n u'Document added: copy of Testdokum\\xe4nt',\n u'Document added: Testdokum\\xe4nt',\n u'Dossier added: An empty dossier'],\n [row.get('Title') for row in listing.dicts()])", "def test_deleteInput(self):\n input1 = self.getArbitraryLoreInput(0)\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.builder.build(\"whatever\", self.howtoDir, self.howtoDir,\n self.templateFile, deleteInput=True)\n self.assertTrue(self.howtoDir.child('one.html').exists())\n self.assertFalse(self.howtoDir.child('one.xhtml').exists())", "def test_copy(self):\n\n tempdir = tempfile.mkdtemp()\n include_example = os.path.join(here, 'include-example.ini')\n manifest = ManifestParser(manifests=(include_example,))\n manifest.copy(tempdir)\n self.assertEqual(sorted(os.listdir(tempdir)),\n ['fleem', 'include', 'include-example.ini'])\n self.assertEqual(sorted(os.listdir(os.path.join(tempdir, 'include'))),\n ['bar.ini', 'crash-handling', 'flowers', 'foo.ini'])\n from_manifest = ManifestParser(manifests=(include_example,))\n to_manifest = os.path.join(tempdir, 'include-example.ini')\n to_manifest = ManifestParser(manifests=(to_manifest,))\n self.assertEqual(to_manifest.get('name'), from_manifest.get('name'))\n shutil.rmtree(tempdir)", "def test_copy_dtm(self):\n new_dtm = self.dtm1.copy()\n self.assert_is_copy(new_dtm, self.dtm1)", "def tokenize(self):\n self.__create_tokens()\n copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)\n if self.__copy:\n copy_obj.copy_file(self.__write_to, \"tokenize.data\")\n copy_obj.rename(self.__write_to, self.__file)\n os.remove(self.__write_to)", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def delete_original( self ):\n try:\n os.remove( self.PATH_TO_SOURCE_FILE )\n copy_check = utility_code.checkFileExistence( self.PATH_TO_SOURCE_FILE ) # should not exist\n if copy_check == 'exists':\n message = 'deletion of original file at ```%s``` failed, as determined by copy_check' % self.PATH_TO_SOURCE_FILE\n log.error( message )\n sys.exit( message )\n else:\n log.info( 'deletion successful of original file at ```%s```' % self.PATH_TO_SOURCE_FILE )\n except Exception, e:\n message = 'deletion of original file at ```%s``` failed; exception, `%s`' % ( self.PATH_TO_SOURCE_FILE, unicode(repr(e)) )\n log.error( message )\n sys.exit( message )\n return", "def test_file_by_filename(self, mock_move):\n\n # Mock the move function so we don't actually end up filing\n p = P.PyPDFOCR()\n cwd = os.getcwd()\n filename = os.path.join(\"pdfs\", \"test_super_long_keyword.pdf\")\n out_filename = filename.replace(\".pdf\", \"_ocr.pdf\")\n\n if os.path.exists(out_filename):\n os.remove(out_filename)\n\n print(\"Current directory: %s\" % os.getcwd())\n #opts = [filename, \"--config=test_pypdfocr_config.yaml\", \"-f\"]\n opts = [filename, \"--config=test_pypdfocr_config_filename.yaml\", \"-f\", \"-n\"]\n p.go(opts)\n\n assert(os.path.exists(out_filename))\n os.remove(out_filename)\n\n calls = [call(out_filename, os.path.abspath(os.path.join('temp', 'target','recipe', os.path.basename(out_filename))))]\n mock_move.assert_has_calls(calls)", "def copy(self):\r\n ret=' '\r\n if self.REQUEST.SESSION.has_key('my_path'):\r\n\t zpath=self.REQUEST.SESSION['my_path'].replace('toolbox_root','').strip('/')\r\n\t #ret=zpath\r\n\t if self.REQUEST.SESSION.has_key('copy_bild'):\r\n\t\t cp_bild=self.REQUEST.SESSION['copy_bild'].split('/')[-1].strip('/')\r\n\t\t cp_path=str('/').join(self.REQUEST.SESSION['copy_bild'].split('/')[0:-1])\r\n\t\t #ret+=' '+cp_path+' '+cp_bild\r\n\t\t if cp_path!=zpath:\r\n\t\t \tn_id=search_id(self,self.restrictedTraverse(zpath).objectValues('Image'))\r\n\t\t \t#ret+=' '+n_id\r\n\t\t\tfor x in liste_val:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tfor obj in self.restrictedTraverse(cp_path).objectValues('Image'):\r\n\t\t\t\t\t if str(obj.getId())[0:6]==cp_bild:\r\n\t\t\t\t\t\tmy_clip=self.restrictedTraverse(cp_path).manage_copyObjects([obj.getId()])\r\n\t\t\t\t\t\tcopied=self.restrictedTraverse(zpath).manage_pasteObjects(my_clip)\r\n\t\t\t\t\t\t#ret+=' new id : '+str(copied[0]['new_id'])\r\n\t\t\t\t\t\t#if str(copied[0]['new_id']).split('_')[0]!=n_id:\r\n\t\t\t\t\t\t#\tself.restrictedTraverse(zpath).manage_renameObjects([str(copied[0]['new_id'])],[str(n_id+x)])\r\n\t\t\t\t\t\t\t#ret +=' False '\r\n\t\t\t\t\t\t#ret+='<br>\\n'\r\n\t\t\t\texcept:\r\n\t\t\t\t\tret+=''\r\n else:\r\n\t ret=' '\r\n return ' '", "def fileCopyToMorph():\r\n print(str(self.copyFilePath))\r\n print(str(self.morphPath))\r\n \"\"\"copyPath = self.createDir + self.name + \"-\" + self.method\r\n print(str(copyPath))\r\n \r\n os.system(copyPath)\"\"\"\r\n os.system(self.copyFilePath)\r\n print(\"Burada sorun yok\")", "def copy(self, src_path: str, tgt_path: str) -> None:", "def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def copy_docs():\n local('rsync -av --delete --exclude=.svn %s:%s/ /tmp/djangodocs/' %\n (env.hosts[0], env.deploy_base.child('docbuilds')))", "def test_copy_name(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Metric (copy)\", metric_copy[\"name\"])", "def copy_to_ocr(doc_dict):\n try:\n\n # check if document directory in OCR input directory exists\n if not os.path.exists(os.path.join(config.TOC_OCR_IN, doc_dict['name'])):\n # create missing directories\n os.makedirs(os.path.join(config.TOC_OCR_IN, doc_dict['name']))\n except:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to create directory {os.path.join(config.TOC_OCR_IN,doc_dict['name'])} in {config.TOC_OCR_IN}...\")\n\n for item in doc_dict['toc']:\n\n # check if file referenced in dictionary is really in the document root directory\n if not os.path.isfile(item):\n\n # raise an exception if the isn't\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): File {item} is not in the document directory {doc_dict['path']}...\")\n\n try:\n # copy file to document directory in OCR input directory\n shutil.copy2(src=item, dst=os.path.join(config.TOC_OCR_IN, doc_dict['name']))\n except:\n\n # raise exception if error occurs during copying\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to copy {item} to {os.path.join(config.TOC_OCR_IN, doc_dict['name'])}...\")", "def decompile():\n #list of files to decompile and results decompile\n dataprocessor_files = []\n\n #list of files to decompile and results decompile for 1C v7.7\n dataprocessor_files_v7 = []\n\n #list of files to decompile and results decompile for 1C MD\n dataprocessor_files_MD = []\n\n #set the exit code\n exit_code = 0\n\n #Find datapocessor files\n for filename in get_list_of_comitted_files():\n #Check the file extensions\n logging.info(\"file to check %s\" % filename)\n if filename[-3:] == \"ert\":\n dataprocessor_files_v7.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n if filename[-3:] in ['.MD','.md']:\n dataprocessor_files_MD.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n\n dirsource = os.path.abspath(os.path.join(os.path.curdir, \"src\"))\n curabsdirpath = os.path.abspath(os.path.curdir) \n\n if len(dataprocessor_files) > 0:\n #pathbin1c = \"C:\\\\Program Files\\\\1cv82\\8.2.17.153\\\\bin\\\\1cv8.exe\"\n #pathbin1c = \"c:\\\\Program Files (x86)\\\\1cv8\\\\8.3.4.304\\\\bin\\\\1cv8.exe\"\n pathbin1c = get_path_to_1c()\n\n if len(dataprocessor_files_v7) > 0:\n for filename in dataprocessor_files_v7:\n print(\"ert file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n\n print(\"ert file %s\" % fullpathfile )\n\n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname)\n newpath2 = os.path.join(newsourcepath, basename)\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n #print(\"curabsdirpath %s\" % curabsdirpath)\n #print(\"newpath2 %s\" % newpath2)\n #print(\"basename %s\" % basename)\n\n t1 = format(\"gcomp -q -d -F %s -D %s -v --no-ini --no-version --no-empty-mxl\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1]) \n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newpath2 )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)\n\n if len(dataprocessor_files_MD) > 0:\n for filename in dataprocessor_files_MD:\n print(\"MD file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n \n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname, \"MD\")\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n newpath2 = os.path.join(newsourcepath, basename)\n print(\"fullbasename %s\" % fullbasename)\n print(\"newdirname %s\" % newdirname)\n print(\"newsourcepath %s\" % newsourcepath)\n \n t1 = format(\"gcomp -d -v -F %s -D %s\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1])\n\n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newsourcepath )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def copy_file_check(self):\n pass", "def test03_add_cv_first_student_with_admin(self):\n actual_name_file = self.students_page.\\\n click_edit_students_list_button().\\\n click_add_new_student_button().\\\n add_cv(data['path_file_cv']).\\\n get_name_cv_file()\n self.assertEqual(actual_name_file,\n data['expected_name_file_cv'])", "def _doc_to_txt(file_path, dst_dir, file_name):\n if file_name is None:\n file_name = os.path.split(file_path)[1]\n file_dst = os.path.join(dst_dir, re.sub(r'\\.doc$', '.txt', file_name))\n with open(file_dst, 'w') as f:\n return subprocess.call([\"antiword\", file_path, \">\", file_dst],\n stdout=f)", "def move_ocr_results(doc_dict):\n # get OCR result files from OCR output directory\n result_files = os.listdir(os.path.join(config.TOC_OCR_OUT, doc_dict['name']))\n if len(result_files) == 0:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Result files not found in {os.path.join(config.TOC_OCR_OUT, doc_dict['name'])}...\")\n\n for item in result_files:\n try:\n\n # check if does not yet exist in document root directory\n if not os.path.isfile(os.path.join(doc_dict['path'], item)):\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Copying {os.path.join(config.TOC_OCR_OUT, doc_dict['name'], item)} to {doc_dict['path']}...\")\n\n # copy the output files if they are not in the document root directory\n shutil.copy2(src=os.path.join(config.TOC_OCR_OUT,doc_dict['name'], item), dst=doc_dict['path'])\n\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} WARNING (OCR): File {item} is already in the directory {doc_dict['path']}...\")\n except:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to copy result file {item} to {doc_dict['path']}...\")", "def test_update_content_copy(self):\n # add same content copy twise, there should be no duplication\n fpath_1 = self.temp_f_1.name\n fm_1 = content.Format.objects.using(self.the_channel_id).get(format_size=102)\n fm_3 = content.Format.objects.using(self.the_channel_id).get(format_size=46)\n file_1 = content.File.objects.using(self.the_channel_id).get(format=fm_1)\n api.update_content_copy(file_1, fpath_1)\n file_3 = content.File.objects.using(self.the_channel_id).filter(format=fm_3)[1]\n api.update_content_copy(file_3, fpath_1)\n self.assertEqual(1, len(os.listdir(settings.CONTENT_COPY_DIR+'/0/9/')))\n\n # swap the content copy in file_3\n fpath_2 = self.temp_f_2.name\n self.assertEqual(file_3.extension, '.pdf')\n api.update_content_copy(file_3, fpath_2)\n self.assertEqual(file_3.extension, '.mp4')\n\n # because file_3 and file_2 all have reference pointing to this content copy,\n # erase the reference from file_2 won't delete the content copy\n fm_2 = content.Format.objects.using(self.the_channel_id).get(format_size=51)\n file_2 = content.File.objects.using(self.the_channel_id).get(format=fm_2)\n api.update_content_copy(file_2, fpath_2)\n self.assertTrue(file_2.content_copy)\n api.update_content_copy(file_2, None)\n self.assertFalse(file_2.content_copy)\n content_copy_path = settings.CONTENT_COPY_DIR+'/3/3/335782204c8215e0061516c6b3b80271.mp4'\n self.assertTrue(os.path.isfile(content_copy_path))\n\n # all reference pointing to this content copy is gone,\n # the content copy should be deleted\n api.update_content_copy(file_3, None)\n self.assertFalse(os.path.isfile(content_copy_path))\n self.assertFalse(file_2.content_copy)\n self.assertFalse(file_2.checksum)\n\n # update None content copy on empty File object should be silent and have no effect\n api.update_content_copy(file_2, None)\n\n # test File __str__ method\n self.assertEqual(file_1.__str__(), '09293abba61d4fcfa4e3bd804bcaba43.pdf')\n\n # test MimeType __str__ method\n self.assertEqual(fm_1.mimetype.__str__(), 'video_high')\n\n # test for non File object exception\n with self.assertRaises(TypeError):\n api.update_content_copy(None, None)", "def test_copy_title(self):\n report_copy = copy_report(self.report, self.DATA_MODEL)\n self.assertEqual(\"Report (copy)\", report_copy[\"title\"])", "def preprocess(self):\n os.system(\"cp \" + self.input + \" \" + self.output)\n return self.output", "def test_postrun_normal(self):\n from scripts.create_pointer_file import postrun\n\n postrun(projectFile=\"/tmp/myproject.prproj\",projectFileExtension=\".prproj\",\n dataCache={'created_asset_folder':\"/path/to/my/assets\"})\n\n self.assertTrue(os.path.exists(\"/tmp/myproject.ptr\"))\n with open(\"/tmp/myproject.ptr\") as f:\n content = f.read()\n self.assertEqual(content,\"/path/to/my/assets\")\n\n os.unlink(\"/tmp/myproject.ptr\")", "def test_doNotDeleteInput(self):\n input1 = self.getArbitraryLoreInput(0)\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.builder.build(\"whatever\", self.howtoDir, self.howtoDir,\n self.templateFile)\n self.assertTrue(self.howtoDir.child('one.html').exists())\n self.assertTrue(self.howtoDir.child('one.xhtml').exists())", "def removeInputCopies(self):\n for p in self.assoc.parlist:\n if int(p['group']) == 1:\n _img = p['image'].datafile\n shutil.move(p['orig_filename'],_img)", "def removeFilenameValidate(call, args=(), kwargs={}, nodeClass='Write'):", "def test_clipboard(self):\n def compare_text(clipboard, text, expected_text):\n self.compare_result = False\n self.compare_result = text == expected_text\n name = self.vimiv.get_pos(True)\n basename = os.path.basename(name)\n abspath = os.path.abspath(name)\n clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n primary = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)\n # Copy basename and abspath to clipboard\n self.vimiv[\"fileextras\"].copy_name(False)\n # Check if the info message is displayed correctly\n self.check_statusbar(\"INFO: Copied \" + basename + \" to clipboard\")\n clipboard.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)\n self.vimiv[\"fileextras\"].copy_name(True)\n clipboard.request_text(compare_text, abspath)\n self.assertTrue(self.compare_result)\n # Toggle to primary and copy basename\n self.vimiv[\"fileextras\"].toggle_clipboard()\n self.vimiv[\"fileextras\"].copy_name(False)\n primary.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)\n # Toggle back to clipboard and copy basename\n self.vimiv[\"fileextras\"].toggle_clipboard()\n self.vimiv[\"fileextras\"].copy_name(False)\n clipboard.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def copy(self, target):\r\n py.process.cmdexec(\"svn copy %s %s\" %(str(self), str(target)))", "def sysFile(*args, copy: AnyStr=\"\", delete: bool=True, makeDir: bool=True, move: AnyStr=\"\",\n removeEmptyDir: bool=True, rename: AnyStr=\"\", **kwargs)->bool:\n pass", "def test_RemovalCandidate_instantiation():\n f = tempfile.mkstemp()\n #f[1] is the absolute pathname.\n rc = r.RemovalCandidate(f[1])\n assert_equal(rc.path,f[1])\n assert_is_instance(rc.mtime,float)\n assert_equal(rc.size,0)\n os.remove(f[1])", "def test_deletion2(engine_contents, engine_locations):\n file_name = 'Triangle.java.xml'\n new_contents = copy.deepcopy(engine_contents)\n new_locations = copy.deepcopy(engine_locations)\n target = (file_name, 'expr_stmt', 0)\n assert XmlEngine.do_delete(engine_contents, engine_locations, new_contents, new_locations, target)\n assert not XmlEngine.do_delete(engine_contents, engine_locations, new_contents, new_locations, target)", "def test_duplicate(self):\n test_file = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-225-1-0.mdd')\n\n mdd.procall([test_file])\n \n self.compare_node58()", "def bless_output(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if path.exists(expected_output_file):\n os.unlink(expected_output_file)\n os.rename(actual_output_file, expected_output_file)", "def delete_file(self, name, container):\r\n try:\r\n cnt = self.get_container(container)\r\n obj = cnt.get_object(name)\r\n obj.delete()\r\n return True\r\n except:\r\n return False", "def test_writer_with_file():\n outputfile = \"testfile.txt\"\n GCMT(write=outputfile)\n assert os.path.exists(outputfile)\n os.remove(outputfile)", "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def copy_and_mark_for_cleanup(self, path):\n filename = os.path.split(path)[1]\n\n shutil.copyfile(path, filename)\n self.add_file_to_clean(filename)\n\n return filename", "def copy_source_files(or_dir,template_dir): \n def copy_sc(file,fpA,fpB):\n fpA = os.path.join(fpA,file)\n if os.path.isfile(fpA):\n shutil.copy(fpA,fpB)\n else:\n raise Exception(\"Error: File '{}' is missing\".format(file))\n return\n \n copy_sc('imta_core.sty',or_dir,template_dir)\n copy_sc('imta_extra.sty',or_dir,template_dir)\n copy_sc('imta_logo.pdf',or_dir,template_dir)\n copy_sc('imta_documentation.tex',or_dir,template_dir)\n print('Template files copied at {}'.format(template_dir))", "def simple_copy():\n src, des = rem('grab')\n # All we need to do is keep the filename same\n # Since the file is of 0 bytes\n des_name = os.path.basename(des)\n des_dir = os.path.dirname(des)\n\n des = os.path.join(des_dir, des_name)\n\n # Now simply open and close des in write mode\n TEMP_STREAM = open(des, 'w')\n TEMP_STREAM.close()\n\n return True", "def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)", "def wp_fp_rm(self,url):\r\n\t\tcontent = self.get_cont(url+\"/readme.html\")\r\n\t\tregex = re.compile(r'Version (.+)')\r\n\t\tres = self.copy(regex.findall(content))\r\n\t\treturn res[0]", "def process_cloned_file(old_name, data):\n # Decide what is a far_child\n base_name = old_name.split(CHILD_TPL_FLAG)[0]\n base_dir = dirname(old_name)\n cache = []\n for tfile in listdir(base_dir):\n tfile = join(base_dir, tfile)\n if isfile(tfile) and tfile.startswith(base_name):\n cache.append(tfile)\n far_child = sorted(cache)[-1]\n # Actual rendering\n if far_child != old_name:\n return False\n jinja_env = Environment(loader=FileSystemLoader(\"/\"))\n info = jinja_env.get_template(old_name).render(data)\n for tfile in cache:\n remove(tfile)\n new_name = Template(base_name).render(data)\n with open(new_name, \"w\") as fil:\n fil.write(info)\n return True", "def copySpecial():\n depNode = nuke.dependencies(nuke.selectedNode())\n dependNode = nuke.dependentNodes(nuke.INPUTS or nuke.HIDDEN_INPUTS or nuke.EXPRESSIONS, [nuke.selectedNode()])\n i = 0\n if dependNode[0].Class() in ['Scene', 'MergeGeo']:\n i = nuke.inputs(dependNode[0])+1\n\n nuke.nodeCopy(nukescripts.cut_paste_file())\n\n for node in nuke.allNodes():\n node['selected'].setValue(0)\n\n nuke.nodePaste(nukescripts.cut_paste_file())\n\n newNode = nuke.selectedNode()\n newNode.setInput(0, depNode[0])\n dependNode[0].setInput(i+1, newNode)", "def _copy_file ( self, source, dest ):\n return", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned", "def combine_documents(path=os.path.join(os.curdir, \"data/processed\"), name='corpus.txt'):\n outname=os.path.join(path, name)\n if os.path.exists(outname):\n os.remove(outname)\n filenames = [f for f in os.listdir(path) if fnmatch.fnmatch(f, '*.txt')]\n with open(outname, 'w') as outfile:\n print \"Combining documents...\"\n for fname in filenames:\n print fname\n with open(os.path.join(path, fname)) as infile:\n outfile.write(infile.read())", "def test_docdir(self):\n self.chck_triple('docdir')", "def test_prepare_for_submission(vasp2w90_calc_and_ref, sandbox_folder):\n vasp_calc, reference = vasp2w90_calc_and_ref\n with pytest.raises(NotImplementedError):\n vasp_calc.prepare_for_submission(sandbox_folder)\n with managed_temp_object() as temp_object:\n vasp_calc.write_incar(temp_object)\n with open(temp_object, 'r', encoding='utf8') as result_incar_fo:\n assert result_incar_fo.readlines() == reference['incar']", "def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server", "def delete(self,result):\n path = self.get_archive_file_path(result) if isinstance(result,RunResults) else result\n if os.path.exists(path):\n os.remove(path)", "def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s", "def makeTestProcessor(test_processor_path):\r\n\r\n className = splitext(basename(test_processor_path))[0]\r\n\r\n with open(test_processor_path, 'w') as f:\r\n f.write(\"\"\"\\\r\n'''\r\nTest processor class - should be deleted upon completion of test\r\n'''\r\n\r\n'''___Built-In Modules___'''\r\nimport sys\r\nfrom os.path import dirname\r\n\r\n'''___Third-Party Modules___'''\r\n\r\n'''___NPL Modules___'''\r\ndataProcessing_directory = dirname(dirname(__file__))\r\nsys.path.append(dataProcessing_directory)\r\nfrom AbstractProcessor import AbstractProcessor\r\n\r\nclass %s(AbstractProcessor):\r\n processor_directory = dirname(__file__)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\"\"\" % (className))\r\n\r\n return 0", "def duplicateClean(obj=None, name=None):\n #Delete Unnecessary 'Orig' Nodes\n import fnmatch\n #duplicate obj\n if obj is None:\n obj = pm.ls(sl=1)[0]\n if name ==None:name = None\n dup = pm.duplicate(obj, n = name)[0]\n cleanUpAttr(sel=[dup],listAttr=['tx','ty','tz','rx','ry','rz','sx','sy','sz'],l=0,k=1,cb=0)\n nodes = pm.ls(dup,dag=1)\n for obj in nodes:\n if fnmatch.fnmatch(obj.name(),'*Orig*'):\n if len(pm.listConnections(obj))==0:\n pm.delete( obj)\n print 'delete unused node \"' +obj+'\" from this scene'\n return dup", "def testExampleFileGeneration(ref):\n outdir = ref.tmp_dir\n outpath = os.path.join(outdir, 'file_result.html')\n generate_file(outpath)\n ref.assertTextFileCorrect(outpath, 'file_result.html',\n ignore_substrings=['Copyright', 'Version'])", "def test_extract_pdf_prev():\n\n test_pdf_path = 'tests/files/research/fea48178ffac3a42035ed27d6e2b897cb570cf13.pdf'\n text = pdf_util.extract_pdf_text_prev(test_pdf_path)\n\n assert text\n assert \"Yoshiyuki\" in text", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def _generate_copy_target(self, src: 'mesonlib.FileOrString', output: Path) -> None:\n if isinstance(src, File):\n instr = src.absolute_path(self.environment.source_dir, self.environment.build_dir)\n else:\n instr = src\n elem = NinjaBuildElement(self.all_outputs, [str(output)], 'COPY_FILE', [instr])\n elem.add_orderdep(instr)\n self.add_build(elem)", "def testMoveAndCopyFile(self):\n try:\n remoteLocator = self.__pathPdbxDictionaryFile\n fn = self.__fileU.getFileName(remoteLocator)\n # _, fn = os.path.split(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n # Test copy file\n dPath2 = os.path.join(self.__workPath, \"tdir\")\n ok = self.__fileU.mkdir(dPath2)\n self.assertTrue(ok)\n lPath2 = os.path.join(dPath2, fn)\n ok = self.__fileU.put(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Remove copied file (to test moving file next)\n ok = self.__fileU.remove(lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertFalse(ok)\n # Test move file\n ok = self.__fileU.replace(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertFalse(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Now clean up files and dirs\n ok = self.__fileU.remove(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(dPath2)\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def test_copy_pickle(self):\n\n # Test that we can pickle and unpickle\n # We force a pattern that contains all custom types:\n # `Selector`, `NullSelector`, `SelectorTag`, `SelectorAttribute`,\n # `SelectorNth`, `SelectorLang`, `SelectorList`, and `Namespaces`\n p1 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n sp1 = pickle.dumps(p1)\n pp1 = pickle.loads(sp1)\n self.assertTrue(pp1 == p1)\n\n # Test that we pull the same one from cache\n p2 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n self.assertTrue(p1 is p2)\n\n # Test that we compile a new one when providing a different flags\n p3 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}, flags=0x10\n )\n self.assertTrue(p1 is not p3)\n self.assertTrue(p1 != p3)\n\n # Test that the copy is equivalent, but not same.\n p4 = copy.copy(p1)\n self.assertTrue(p4 is not p1)\n self.assertTrue(p4 == p1)\n\n p5 = copy.copy(p3)\n self.assertTrue(p5 is not p3)\n self.assertTrue(p5 == p3)\n self.assertTrue(p5 is not p4)", "def test_single_file_upgma(self):\r\n\r\n titles = ['hi', 'ho']\r\n distdata = numpy.array([[0, .5], [.5, 0.]])\r\n fd, fname = mkstemp(prefix='upgma_', suffix='.txt')\r\n close(fd)\r\n f = open(fname, 'w')\r\n self._paths_to_clean_up.append(fname)\r\n f.write(format_distance_matrix(titles, distdata))\r\n f.close()\r\n\r\n fd, fname2 = mkstemp(prefix='upgma_', suffix='.txt')\r\n close(fd)\r\n self._paths_to_clean_up.append(fname2)\r\n single_file_upgma(fname, fname2)\r\n assert(os.path.exists(fname2))", "def clean_pdf():\n xnt.build.tex.clean(path=\"./\", remove_pdf=True)", "def test_merge(self):\r\n filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())\r\n generate.merge(CONFIGURATION.source_locale, target=filename)\r\n self.assertTrue(os.path.exists(filename))\r\n os.remove(filename)", "def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)", "def subtract_model(self, outfile, del_script=True):\n os.system('cp -r {} {}'.format(self.ms, outfile)) \n ct.subtract_model(outfile, delete=del_script)", "def test_commonfs_truecase():\n f1 = tempfile.mkstemp()\n f2 = tempfile.mkstemp()\n rc1 = r.RemovalCandidate(f1[1])\n rc2 = r.RemovalCandidate(f2[1])\n assert r.commonfs([rc1,rc2])", "def cleanup(job, tempOutputFileStoreID, outputFile, cores=1, memory=sortMemory, disk=\"3G\"):\n fileName = job.fileStore.readGlobalFile(tempOutputFileStoreID)\n shutil.copyfile(fileName, outputFile)\n job.fileStore.logToMaster(\"Finished copying sorted file to output: %s\" % outputFile)", "def test_read_delete(self):\n\n expected = \"Hello, World! This has been written by Fun Ilrys.\"\n File(\"hi\").write(expected)\n actual = File(\"hi\").read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def dummy(doc):\r\n return doc", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def testDetermineDestNoFileIn(self):\n # Reset the members list\n cdl_convert.ColorCollection.reset_members()\n\n # Create a few Collections\n cdl_convert.ColorCollection()\n cdl_convert.ColorCollection()\n cdl_convert.ColorCollection()\n\n # The 4th one will be the one we use\n self.node = cdl_convert.ColorCollection()\n\n # But we'll create a 5th.\n cdl_convert.ColorCollection()\n\n self.node.type = 'ccc'\n\n self.node.determine_dest('./converted/')\n\n self.assertEqual(\n './converted/color_collection_003.ccc',\n self.node.file_out\n )", "def test_mbd():\n example.control.mbd_comparison('mbd_comparison.pdf')\n assert path.isfile('mbd_comparison.pdf')", "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "def test_copy_without_name(self):\n self.metric[\"name\"] = \"\"\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Security warnings (copy)\", metric_copy[\"name\"])", "def test_copy(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test_copy\")\n annot1 = s.add_annotation(Feature, \"exon\", \"annot1\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"annot2\", [(10, 14)])\n got = s.copy()\n got_annot1 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot1\"\n )[0]\n got_annot2 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot2\"\n )[0]\n self.assertIsNot(got, s)\n self.assertIsNot(got_annot1, annot1)\n self.assertIsNot(got_annot2, annot2)\n self.assertEqual(got.name, s.name)\n self.assertEqual(got.info, s.info)\n self.assertEqual(got._seq, s._seq)\n self.assertEqual(got.moltype, s.moltype)\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)", "def cpr(src, dst):\n shutil.copytree(src, dst)", "def test_copy_delete_file(tmp_path: Path) -> None:\n sample_file = Path(__file__).parent.joinpath(\"sample.txt\")\n\n sample_file_tmp = tmp_path.joinpath(\"sample.txt\")\n assert not os.path.exists(sample_file_tmp)\n\n shutil.copyfile(sample_file, sample_file_tmp)\n assert os.path.isfile(sample_file_tmp)\n # pathlib.Path equivalent\n assert sample_file_tmp.is_file()\n\n os.remove(sample_file_tmp)\n assert not os.path.exists(sample_file_tmp)\n # pathlib.Path equivalent\n assert not sample_file_tmp.exists()", "def test_write_delete(self):\n\n expected = \"Hello, World! I'm domain2idna\"\n File(\"hi\").write(expected)\n\n with open(\"hi\") as file:\n actual = file.read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)" ]
[ "0.58008", "0.5709822", "0.5659337", "0.5595037", "0.5574128", "0.54875076", "0.5483825", "0.5405747", "0.53936034", "0.53486127", "0.53021306", "0.52604765", "0.52571845", "0.52143526", "0.52143526", "0.5193308", "0.5167983", "0.5155168", "0.51270175", "0.5124503", "0.50970846", "0.5094332", "0.5083328", "0.50674343", "0.5065151", "0.50050426", "0.49950552", "0.49881393", "0.4964329", "0.4943085", "0.49367082", "0.4930988", "0.49189433", "0.4907984", "0.49029863", "0.48907125", "0.48903057", "0.48712194", "0.48655388", "0.48249254", "0.4823648", "0.48189092", "0.481765", "0.48097923", "0.4762148", "0.47599307", "0.4757174", "0.47550765", "0.47478515", "0.47333366", "0.4732185", "0.4696987", "0.46968383", "0.46806577", "0.46805385", "0.4680258", "0.4679583", "0.46764395", "0.46667418", "0.46515098", "0.46490878", "0.46477407", "0.4647608", "0.46453443", "0.4644923", "0.4641768", "0.4638934", "0.46364978", "0.46355182", "0.4624679", "0.46236396", "0.46216926", "0.46104637", "0.45988417", "0.45930904", "0.45863643", "0.45852545", "0.4583446", "0.45820594", "0.4579744", "0.45702624", "0.45661443", "0.45658877", "0.45658845", "0.45645788", "0.45605013", "0.45524788", "0.4546937", "0.45400566", "0.45366335", "0.45357165", "0.45274028", "0.45128947", "0.45120823", "0.45003855", "0.44979298", "0.44957343", "0.44931024", "0.44892302", "0.44860494" ]
0.47389197
49
>>> import os >>> import shutil >>> import core.docprocessor >>> import xml.etree.ElementTree >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> e = xml.etree.ElementTree.parse(os.path.join( ... cv1.docbook_path, cv1.name.xml)).getroot() >>> e.findall('para')[0].text
def convert(self): logger.info('Convert: %s' % self.base) if self.mimetype in ['application/msword', "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document"]: if 'multipart/related' in self.stream: self.process_mht() returncode = self.convert_docfile(self.docx_path, self.name.docx, self.docbook_path, self.name.xml) else: returncode = self.convert_docfile(self.source_path, self.name, self.docbook_path, self.name.xml) if returncode is False: returncode = self.convert_docfile(self.source_path, self.name, self.docx_path, self.name.docx) returncode = self.convert_docfile(self.docx_path, self.name.docx, self.docbook_path, self.name.xml) if not os.path.exists(os.path.join( self.docbook_path, self.name.xml)): logger.info('Not exists') self.resultcode = 2 return False if returncode is False: self.resultcode = 3 return False self.remove_note() self.file_docbook_to_markdown() logger.info(' '.join([self.base.base, self.name.base, 'Success'])) self.resultcode = 0 return True else: logger.info('Skip') self.resultcode = 1 return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_docx_text(path):\n document = zipfile.ZipFile(path)\n xml_content = document.read('word/document.xml')\n document.close()\n tree = XML(xml_content)\n \n paragraphs = []\n for paragraph in tree.getiterator(PARA):\n texts = [node.text\n for node in paragraph.getiterator(TEXT)\n if node.text]\n if texts:\n paragraphs.append(''.join(texts))\n \n return '\\n\\n'.join(paragraphs)", "def parse_corenlp_coref_xml_doc(input_dir = 'CoreNLP_coref_anno/dev'):\n\n\tmentions = []\n\tfor file in os.listdir(input_dir):\n\t\ttree = ET.parse(input_dir + '/' + file)\n\t\tdocument = tree.getroot()[0]\n\t\t# sentences_node = document.find('sentences')\n\n\t\t# for sentence in enumerate(sentences_node):\n\t\t# \ts_num = sentence.attribs['id']\n\t\t# \tsentence_text = \" \".join([token.word for token in sentence.find('tokens')])\n\t\t# \tsentences[s_num] = sentence_text\n\n\t\tcoref_node = document.find('coreference')\n\t\t\n\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\tfor mention in cluster:\n\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\ttext = mention[4].text\n\t\t\t\tmentions.append({\"filename\":file, \"s_num\":sent_num,\"EP\":\"E\", \"indices\":range(start, end),\"coref\":coref_id+1})\n\n\tmentions.sort(key=lambda x:(x[\"filename\"],x[\"s_num\"],x[\"indices\"][0]))\n\twith open('coref_output.txt', 'w') as out_file:\n\t\tout_file.write(\"file\\tsentence\\tentity(E) or predicate(P)\\t coref chain\\tindices\\t\\n\")\n\t\tout_file.write(\"\\n\".join([e[\"filename\"]+\"\\t\"+str(e[\"s_num\"])+\"\\t\"+e[\"EP\"]+\"\\t\"+str(e[\"coref\"])+\"\\t\"+str(e[\"indices\"])[1:-1] for e in mentions]))", "def __call__(self, doc):\n return doc", "def documento():\r\n\tpass", "def parseParagraphs(self, xml, index, node):\n if xml is None: return \"\"\n paragraphs = []\n for paragraph in xml.findall(\"para\"):\n terms = []\n if paragraph.text is not None: terms.append(paragraph.text)\n for child in paragraph:\n if child.tag == \"parameterlist\":\n for parameteritem in child:\n namelist = parameteritem.find(\"parameternamelist\")\n description = self.parseParagraphs(\n parameteritem.find(\"parameterdescription\"), index, node\n )\n for parametername in namelist.findall(\"parametername\"):\n for param in self.params:\n if param.name == parametername.text:\n param.direction = parametername.get(\"direction\")\n param.brief = description\n break\n elif child.tag == \"programlisting\":\n paragraphs.append(\"\".join(terms))\n paragraphs.append(child)\n terms = []\n elif child.tag == \"ulink\":\n if child.get(\"url\").startswith(\"bpdox.\"):\n cmd = child.get(\"url\")[6:]\n if cmd.startswith(\"label:\"):\n label = cmd[6:].strip()\n if node.label:\n logging.warning(\n \" cannot apply label '{new}'; overload of '{name}' \"\n \"already already labeled '{old}'\".format(\n new=label, name=\"::\".join(node.name), old=node.label\n )\n )\n node.label = label\n logging.debug(\n \" attaching label '{0}' to overload of '{1}'\".format(\n label, \"::\".join(node.name)\n )\n )\n elif cmd.strip() == \"ignore\":\n try:\n node.hide()\n logging.debug(\" ignoring overload of '{0}'\".format(\"::\".join(node.name)))\n except:\n logging.warning(\n \" bpdox.ignore invalid for '{0}'\".format(\"::\".join(node.name))\n )\n else:\n logging.warning(\n \" unrecognized @bpdox command '{0}' for '{1}'\".format(\n cmd, \"::\".join(node.name)\n )\n )\n if child.text is not None:\n terms.append(child.text)\n elif child.text is not None:\n terms.append(child.text)\n if child.tail is not None:\n terms.append(child.tail)\n paragraphs.append(\"\".join(terms))\n return paragraphs", "def get_doc(self):\n return self.p", "def load_doc(filename):\n\tfile = open(filename, 'r')\n\ttext = file.read()\n\tfile.close()\n\treturn text", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def load_doc(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n return text", "def load_doc(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n return text", "def get_doc(filename :str) -> List[List[str]]:\n\tdata = []\n\ttry:\n\t\twith open(filename, 'r', encoding='utf-8') as f:\n\t\t\tcontent = f.read()\n\t\t\t# print(content)\n\t\t\tpattern = re.compile(r\"<doc.*?>(.*?)</doc>\",re.S)\n\t\t\ttexts = re.findall(pattern, content)\n\t\t\t# print(data)\n\n\t\t\tfor text in texts:\n\t\t\t\t# print(text)\n\t\t\t\ttemp = process_doc(text)\n\t\t\t\tdata.extend(temp)\n\t\t\t\t# print(len(temp))\n\n\t\t\treturn data\n\n\texcept IOError as e:\n\t\tprint(\"the file {} cannot open\".format(filename))\n\t\tprint(e)\n\t\traise IOError", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def xml2txt(filename):\n try:\n tree = et.parse(filename)\n except:\n return None\n root = tree.getroot()\n namespace = root.tag.split('}')[0]+'}'\n body = root.find(namespace+'contentSet')\\\n .find(namespace+'inlineXML')\\\n .find(namespace+'html')\\\n .find(namespace+'body')\n\n out = \"\"\n for elem in body:\n if elem.tag.split('}')[-1] == 'p':\n if elem.text:\n text = get_text(elem)\n if len(text) > 0:\n out += text.strip() + '\\n' # New paragraph (single newline)\n\n return out", "def dummy(doc):\r\n return doc", "def parseDoc(cur):\n ret = libxml2mod.xmlParseDoc(cur)\n if ret is None:raise parserError('xmlParseDoc() failed')\n return xmlDoc(_obj=ret)", "def find_document(self):\n pass", "def parse(self, doc):\n self.preprocessor.preprocess(doc)\n\n for extractor in self.extractors:\n extractor.extract(doc)\n\n return doc", "def get_grobid_xml(self, paper_id):\n\n filename=cfg.folder_pdf+paper_id+\".pdf\"\n filename_xml=cfg.folder_content_xml+paper_id+\".xml\"\n\n ## check if XML file is already available\n if os.path.isfile(filename_xml):\n ## yes, load from cache\n root=etree.parse(filename_xml)\n # check the validity of the xml\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing\")\n else:\n if not os.path.isfile(filename):\n raise Exception(\"PDF for \"+paper_id+\" does not exist.\")\n ## no, get from GROBID\n url = cfg.grobid_url + '/processFulltextDocument'\n params = {\n 'input': open(filename, 'rb')\n }\n response = requests.post(url, files=params)\n if response.status_code == 200:\n ## it worked. now parse the result to XML\n parser = etree.XMLParser(encoding='UTF-8', recover=True)\n tei = response.content\n tei = tei if not isinstance(tei, text_type) else tei.encode('utf-8')\n root = etree.fromstring(tei, parser)\n ## and store it to xml cache\n with open(filename_xml, 'wb') as f:\n f.write(etree.tostring(root, pretty_print=True))\n # Check if the xml file derived from a valid pdf with unicode mapping\n # Correct: <teiHeader xml:lang=\"en\">\n # Incorrect: <teiHeader xml:lang=\"de\">\n if self.check_validity_of_xml(root):\n return root\n else:\n raise Exception(\"Error in xml, pdf either broken or not extractable (i.e Unicode mapping missing)\")\n else:\n raise Exception(\"Error calling GROBID for \"+paper_id+\": \"+str(response.status_code)+\" \"+response.reason)", "def parseDocument(self):\n ret = libxml2mod.xmlParseDocument(self._o)\n return ret", "def readDoc(self, filename):\n try:\n doc = ET.parse( filename, parser=LineNumberingParser() )\n except self.ET_exc_class:\n raise XmlError( str(sys.exc_info()[1]) )\n\n rootnode = recurse_construct_ET_to_XmlNode( None, doc.getroot() )\n\n return rootnode", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def document(self):\n ...", "def extract_content_from_document(self, filename):\n ext = os.path.splitext(filename)[1]\n if ext == '.docx':\n with open(filename, \"rb\") as f:\n html = mammoth.convert_to_html(f).value\n text = mammoth.extract_raw_text(f).value\n return (text, html)\n else:\n # TODO: handle .doc\n raise ValueError(\"Can only handle .docx files, but got %s\" % ext)", "def parse(self, fileName):\n from lxml import etree\n \n schemadoc = etree.parse(StringIO(\"\"\"\\\n<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <!-- the runscript -->\n <xs:complexType name=\"runscriptType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"machine\" type=\"machineType\"/>\n <xs:element name=\"system\" type=\"systemType\">\n <!-- setting keys have to be unique per system/version-->\n <!-- unfortunately i have found no way to create a link between settings and systems -->\n <!-- schematron should be able to do this but the lxml implementation seems to be incomplete-->\n <xs:unique name=\"settingKey\">\n <xs:selector xpath=\"setting\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n <xs:element name=\"config\" type=\"configType\"/>\n <xs:element name=\"benchmark\" type=\"benchmarkType\"/>\n <xs:element name=\"pbsjob\" type=\"pbsjobType\"/>\n <xs:element name=\"condorjob\" type=\"condorjobType\"/>\n <xs:element name=\"seqjob\" type=\"seqjobType\"/>\n <xs:element name=\"project\" type=\"projectType\"/>\n </xs:choice>\n <xs:attribute name=\"output\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a project -->\n <xs:complexType name=\"projectType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"runspec\" type=\"runspecType\"/>\n <xs:element name=\"runtag\" type=\"runtagType\"/>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"job\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a machine -->\n <xs:complexType name=\"machineType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"cpu\" type=\"xs:token\" use=\"required\"/>\n <xs:attribute name=\"memory\" type=\"xs:token\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a system -->\n <xs:complexType name=\"systemType\">\n <xs:choice minOccurs=\"1\" maxOccurs=\"unbounded\">\n <xs:element name=\"setting\">\n <xs:complexType>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"tag\">\n <xs:simpleType>\n <xs:list itemType=\"nameType\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"ppn\" type=\"xs:positiveInteger\"/>\n <xs:attribute name=\"procs\">\n <xs:simpleType>\n <xs:list itemType=\"xs:integer\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"pbstemplate\" type=\"xs:string\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"measures\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"config\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- generic attributes for jobs-->\n <xs:attributeGroup name=\"jobAttr\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"timeout\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"runs\" type=\"xs:positiveInteger\" use=\"required\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:attributeGroup>\n \n <!-- a seqjob -->\n <xs:complexType name=\"seqjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"parallel\" type=\"xs:positiveInteger\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a pbsjob -->\n <xs:complexType name=\"pbsjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a condorjob -->\n <xs:complexType name=\"condorjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"condortemplate\" type=\"xs:string\" use=\"required\"/>\n <xs:attribute name=\"basedir\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n\n\n <!-- a config -->\n <xs:complexType name=\"configType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"template\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a benchmark -->\n <xs:complexType name=\"benchmarkType\">\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:choice>\n <xs:element name=\"files\">\n <xs:complexType>\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"add\">\n <xs:complexType>\n <xs:attribute name=\"file\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"folder\">\n <xs:complexType>\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"ignore\">\n <xs:complexType>\n <xs:attribute name=\"prefix\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n </xs:sequence>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- common attributes for runspec/runtag -->\n <xs:attributeGroup name=\"runAttr\">\n <xs:attribute name=\"machine\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"benchmark\" type=\"nameType\" use=\"required\"/>\n </xs:attributeGroup>\n \n <!-- a runspec -->\n <xs:complexType name=\"runspecType\">\n <xs:attribute name=\"system\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"setting\" type=\"nameType\" use=\"required\"/>\n <xs:attributeGroup ref=\"runAttr\"/>\n </xs:complexType>\n \n <!-- a runtag -->\n <xs:complexType name=\"runtagType\">\n <xs:attributeGroup ref=\"runAttr\"/>\n <xs:attribute name=\"tag\" type=\"tagrefType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- simple types used througout the above definitions -->\n <xs:simpleType name=\"versionType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9a-zA-Z._-]+\"/>\n </xs:restriction>\n </xs:simpleType>\n\n <xs:simpleType name=\"timeType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9]+(:[0-9]+(:[0-9]+)?)?\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"tagrefType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"(\\*all\\*)|([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*)([ ]*\\|[ ]*([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*))*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"nameType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[A-Za-z_\\-0-9]*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <!-- the root element -->\n <xs:element name=\"runscript\" type=\"runscriptType\">\n <!-- machine keys -->\n <xs:keyref name=\"machineRef\" refer=\"machineKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@machine\"/>\n </xs:keyref>\n <xs:key name=\"machineKey\">\n <xs:selector xpath=\"machine\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- benchmark keys -->\n <xs:keyref name=\"benchmarkRef\" refer=\"benchmarkKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@benchmark\"/>\n </xs:keyref>\n <xs:key name=\"benchmarkKey\">\n <xs:selector xpath=\"benchmark\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- system keys -->\n <xs:keyref name=\"systemRef\" refer=\"systemKey\">\n <xs:selector xpath=\"project/runspec\"/>\n <xs:field xpath=\"@system\"/>\n <xs:field xpath=\"@version\"/>\n </xs:keyref>\n <xs:key name=\"systemKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@name\"/>\n <xs:field xpath=\"@version\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"configRef\" refer=\"configKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@config\"/>\n </xs:keyref>\n <xs:key name=\"configKey\">\n <xs:selector xpath=\"config\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"jobRef\" refer=\"jobKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@job\"/>\n </xs:keyref>\n <xs:key name=\"jobKey\">\n <xs:selector xpath=\"seqjob|pbsjob|condorjob\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- project keys -->\n <xs:unique name=\"projectKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n</xs:schema>\n\"\"\"))\n schema = etree.XMLSchema(schemadoc)\n\n doc = etree.parse(open(fileName))\n schema.assertValid(doc)\n \n root = doc.getroot()\n run = Runscript(root.get(\"output\"))\n\n for node in root.xpath(\"./pbsjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = PbsJob(node.get(\"name\"), node.get(\"memout\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./condorjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = CondorJob(node.get(\"name\"), tools.xmlTime(node.get(\"memout\")), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), node.get(\"condortemplate\"),node.get(\"basedir\"), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./seqjob\"):\n attr = self._filterAttr(node, [\"name\", \"timeout\", \"runs\", \"parallel\"])\n job = SeqJob(node.get(\"name\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), int(node.get(\"parallel\")), attr)\n run.addJob(job)\n \n for node in root.xpath(\"./machine\"):\n machine = Machine(node.get(\"name\"), node.get(\"cpu\"), node.get(\"memory\"))\n run.addMachine(machine)\n\n for node in root.xpath(\"./config\"):\n config = Config(node.get(\"name\"), node.get(\"template\"))\n run.addConfig(config)\n \n compoundSettings = {}\n sytemOrder = 0 \n for node in root.xpath(\"./system\"):\n system = System(node.get(\"name\"), node.get(\"version\"), node.get(\"measures\"), sytemOrder)\n settingOrder = 0\n for child in node.xpath(\"setting\"):\n attr = self._filterAttr(child, [\"name\", \"cmdline\", \"tag\"])\n compoundSettings[child.get(\"name\")] = []\n if \"procs\" in attr:\n procs = [int(proc) for proc in attr[\"procs\"].split(None)]\n del attr[\"procs\"]\n else: procs = [None]\n if \"ppn\" in attr: \n ppn = int(attr[\"ppn\"])\n del attr[\"ppn\"]\n else: ppn = None\n if \"pbstemplate\" in attr:\n pbstemplate = attr[\"pbstemplate\"]\n del attr[\"pbstemplate\"]\n else: pbstemplate = None\n if child.get(\"tag\") == None: tag = set()\n else: tag = set(child.get(\"tag\").split(None))\n for num in procs:\n name = child.get(\"name\")\n if num != None: \n name += \"-n{0}\".format(num)\n compoundSettings[child.get(\"name\")].append(name)\n setting = Setting(name, child.get(\"cmdline\"), tag, settingOrder, num, ppn, pbstemplate, attr)\n system.addSetting(setting)\n settingOrder += 1\n\n run.addSystem(system, node.get(\"config\"))\n sytemOrder += 1\n \n for node in root.xpath(\"./benchmark\"):\n benchmark = Benchmark(node.get(\"name\"))\n for child in node.xpath(\"./folder\"):\n element = Benchmark.Folder(child.get(\"path\"))\n for grandchild in child.xpath(\"./ignore\"):\n element.addIgnore(grandchild.get(\"prefix\"))\n benchmark.addElement(element)\n for child in node.xpath(\"./files\"):\n element = Benchmark.Files(child.get(\"path\"))\n for grandchild in child.xpath(\"./add\"):\n element.addFile(grandchild.get(\"file\"))\n benchmark.addElement(element)\n run.addBenchmark(benchmark)\n \n for node in root.xpath(\"./project\"):\n project = Project(node.get(\"name\"))\n run.addProject(project, node.get(\"job\"))\n for child in node.xpath(\"./runspec\"):\n for setting in compoundSettings[child.get(\"setting\")]: \n project.addRunspec(child.get(\"machine\"),\n child.get(\"system\"),\n child.get(\"version\"),\n setting,\n child.get(\"benchmark\"))\n \n for child in node.xpath(\"./runtag\"):\n project.addRuntag(child.get(\"machine\"), \n child.get(\"benchmark\"),\n child.get(\"tag\"))\n \n return run", "def get_doc(self) -> str:\n if self.soup is not None:\n root = self.soup.contents[0]\n body = self.get_paragraph(root.find(\"abstract\", recursive=False))\n body += self.get_paragraph(root.find(\"discussion\", recursive=False))\n return body\n\n return self.doc", "def test() -> None:\n docx2python(\"resources/example.docx\")", "def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc", "def read_xml(xmlptf):\n \n # read document to workspace\n with open(xmlptf, 'r') as ff:\n temp = ff.read()\n ff.close()\n\n # make it into a soup object\n out = BeautifulSoup(temp, 'xml')\n \n return out", "def docx():\n env.file_ext = \".docx\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --bibliography={bib_file} --csl={csl_file} --toc\".format(**env))", "def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")", "def fini_doc(self):\n raise NotImplementedError()", "def core_document_parser(session, filename, options):\n vp = CoreVersionParser(filename, options)\n if 'dom' not in options:\n options['dom'] = vp.dom\n if vp.version == '0.0':\n doc = CoreDocumentParser0(session, filename, options)\n elif vp.version == '1.0':\n doc = CoreDocumentParser1(session, filename, options)\n else:\n raise ValueError('unsupported document version: %s' % vp.version)\n return doc", "def doc(self):\n return \"\\n\".join(self.docLines)", "def processed_doc(self, pipeline):\n return [pipeline(text) for text in EN_DOCS]", "def newDocPI(self, name, content):\n ret = libxml2mod.xmlNewDocPI(self._o, name, content)\n if ret is None:raise treeError('xmlNewDocPI() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def doc(self):\n ret = libxml2mod.xmlParserGetDoc(self._o)\n if ret is None:raise parserError('xmlParserGetDoc() failed')\n __tmp = xmlDoc(_obj=ret)\n return __tmp", "def parse_design(self, detailed_design_file):", "def analyse_document(dom, arguments):\n model = dom.getElementsByTagName(\"model\")[0]\n return analyse_model(model, arguments)", "def doc(self):\n\n if not hasattr(self, \"_doc\"):\n self.loader.cdr_cursor.execute(self.SELECT_XML, self.id)\n xml = self.loader.cdr_cursor.fetchone().xml\n self._doc = etree.fromstring(xml.encode(\"utf-8\"))\n return self._doc", "def extraire_contenu_du_document(root):\n contenu = ''\n for element in root.findall('.//BLOC_TEXTUEL/CONTENU'):\n contenu = contenu.join(element.itertext())\n return contenu", "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "def recoverDoc(cur):\n ret = libxml2mod.xmlRecoverDoc(cur)\n if ret is None:raise treeError('xmlRecoverDoc() failed')\n return xmlDoc(_obj=ret)", "def doc(self):\n try:\n return self.definition.doc\n except AttributeError:\n return self.raw_doc", "def preprocess(doc_in, doc_out):\n def output(text, doc_id):\n doc_out.write(doc_id + \"\\n\")\n doc_out.write(text.replace(\"\\n\", \" \") + \"\\n\\n\")\n\n def filter_text(t):\n filtered_out = [\"<P>\", \"</P>\"]\n r = t\n for f in filtered_out:\n r = r.replace(f, \" \")\n return r\n\n\n doc_id = None\n reading_text = False\n text = \"\"\n for line in doc_in:\n if(str_text_start in line):\n if(reading_text):\n warning(\"Found \" + str_text_start + \" in text\")\n if(not doc_id):\n warning(\"Reading text without knowing id\")\n continue\n reading_text = True\n continue\n if((str_text_stop in line) and reading_text):\n output(text, doc_id)\n text = \"\"\n reading_text = False\n doc_id = None\n doc_id_match = pat_doc_no.match(line)\n if(doc_id_match):\n doc_id = doc_id_match.group(1)\n if(reading_text):\n warning(\"Found doc id in text\")\n continue\n if(reading_text):\n text = text + filter_text(line)", "def process_xml(self):\n self.process_gpx_file(str(self.filename))", "def process(self, doc):\n raise multisearch.errors.FeatureNotAvailableError", "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn", "def get_full_text_from_source(self):\n extension = self.get_doc_file_extension()\n\n if extension in ('txt', ''):\n # string = unicode(string)\n return self.doc_file.read().decode(\"utf-8\")\n elif extension == 'docx':\n docx_document = Docx(BytesIO(self.doc_file.read()))\n return \"\\n\".join(p.text for p in docx_document.paragraphs)\n elif extension == 'pdf':\n raise NotImplementedError()\n else:\n raise ValueError(\"file_format not supported\")", "def get_doc(self):\n return self._doc", "def process_doc(self, doc):\n\n # Ignore empty documents\n if not doc or not doc.get('Tests'):\n return\n\n # Clear datatypes and params from previous documents\n gt.datatypes.clear()\n gt.param.clear()\n\n # Return dictionary of all known datatypes\n gt.datatypes.update(gt.get_datatypes(doc))\n\n # Arguments structure corresponding to C/C++ structure\n gt.param['Arguments'] = type('Arguments', (gt.ctypes.Structure,),\n {'_fields_': gt.get_arguments(doc)})\n\n # Special names which get expanded as lists of arguments\n gt.param['dict_lists_to_expand'] = doc.get('Dictionary lists to expand') or ()\n\n # Lists which are not expanded\n gt.param['lists_to_not_expand'] = doc.get('Lists to not expand') or ()\n\n # Defaults\n defaults = doc.get('Defaults') or {}\n\n default_add_ons = {\"m\": 1, \"M\": 1, \"n\": 1, \"N\": 1, \"k\": 1, \"K\": 1, \"lda\": 1, \"ldb\": 1, \"ldc\": 1, \"LDA\": 1, \"LDB\": 1, \"LDC\": 1, \"iters\": 1, \"flops\": '', \"mem\": '', \"samples\": 1, \"step_mult\": 0}\n defaults.update(default_add_ons)\n\n # Known Bugs\n gt.param['known_bugs'] = doc.get('Known bugs') or []\n\n # Functions\n gt.param['Functions'] = doc.get('Functions') or {}\n\n # Instantiate all of the tests, starting with defaults\n for test in doc['Tests']:\n case = defaults.copy()\n case.update(test)\n gt.generate(case, gt.instantiate)", "def parsexml(self):\n raise NotImplementedError", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_XmlDumpFirstRev(self):\n pages = get_entries('article-pear.xml', allrevisions=False)\n self.assertLength(pages, 1)\n self.assertEqual('Automated conversion', pages[0].comment)\n self.assertEqual('Pear', pages[0].title)\n self.assertEqual('24278', pages[0].id)\n self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))\n self.assertTrue(not pages[0].isredirect)", "def parse_post_describeprocess(doc):\n\n version = doc.attrib.get('version')\n wpsrequest.check_and_set_version(version)\n\n language = doc.attrib.get('language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.operation = 'describeprocess'\n wpsrequest.identifiers = [identifier_el.text for identifier_el in\n xpath_ns(doc, './ows:Identifier')]", "def save_annotated_text_to_xml(self):\n #initialise file to write the output\n outfile = open(('annotated_text_' + self.lang + '_' + \n self.method + '.xml'), 'w')\n #initialise xml\n annotated_doc = etree.Element('Annotated_document')\n main_text = ''\n #counter for the sentences\n counter_sentence = 0\n #counter for the paragraphs\n counter_paragraph = 0\n #open txt file\n with open(self.lang + '.txt') as file:\n for paragraph in file:\n paragraph_string = ''\n sentences = tokenize.sent_tokenize(paragraph)\n for sentence in sentences:\n #build lists with the ends of the tokens with NE and the NEs\n end_list = [0]\n end_list += [i[2] for i in \n self.named_entity_list_total[counter_sentence]]\n ne_list = [i[3] for i in \n self.named_entity_list_total[counter_sentence]]\n counter_sentence += 1\n #build new string\n new_string = ''\n for i in range(len(end_list)-1):\n new_string += (sentence[end_list[i]:end_list[i+1]]+\n '<annotation class=\"'+ne_list[i]+'\"/>')\n new_string += sentence[end_list[-1]:len(sentence)]\n paragraph_string += new_string+'\\n'\n #print title, author, abstract and main text differently to xml\n if counter_paragraph == 0:\n title_text = etree.SubElement(annotated_doc, \"Title\")\n #add text to the node\n init_text = \"<text>{0}</text>\".format(paragraph_string[6:])\n fin_text = etree.fromstring(init_text)\n title_text.append(fin_text)\n elif counter_paragraph == 1:\n author_text = etree.SubElement(annotated_doc, \"Author\")\n #add text to the node\n init_text = \"<text>{0}</text>\".format(paragraph_string[7:])\n fin_text = etree.fromstring(init_text)\n author_text.append(fin_text)\n elif counter_paragraph == 2:\n abstract_text = etree.SubElement(annotated_doc, \"Abstract\")\n #add text to the node\n init_text = \"<text>{0}</text>\".format(paragraph_string[9:])\n fin_text = etree.fromstring(init_text)\n abstract_text.append(fin_text)\n else: \n main_text += paragraph_string\n counter_paragraph += 1\n main_text_xml = etree.SubElement(annotated_doc, \"Main_text\")\n #add text to the node\n init_text = \"<text>{0}</text>\".format(main_text)\n fin_text = etree.fromstring(init_text)\n main_text_xml.append(fin_text)\n #convert and write to outfile\n xml_bytes = etree.tostring(annotated_doc, encoding='UTF-8', \n pretty_print=True, xml_declaration=True)\n xml_str = xml_bytes.decode(\"utf-8\")\n outfile.write(xml_str)\n outfile.close()\n return", "def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n self.root = util.etree.Element(self.markdown.doc_tag)\r\n self.parseChunk(self.root, '\\n'.join(lines))\r\n return util.etree.ElementTree(self.root)", "def digital_text(file_path):\n doc = fitz.open(file_path)\n page_count = doc.pageCount\n print(\"\\n number of pages : \",page_count)\n total_text = \"\"\n try:\n for page_num in range(page_count):\n p = doc.loadPage(page_num)\n page_text = p.getText()\n total_text += page_text\n print(\"\\n number of pages extracted : \", (page_count))\n except Exception as e:\n print(\"\\n Error in digital_text : \", traceback.format_exc(()))\n return total_text", "def test_doc():\n pass", "def CurrentDoc(self):\n ret = libxml2mod.xmlTextReaderCurrentDoc(self._o)\n if ret is None:raise treeError('xmlTextReaderCurrentDoc() failed')\n __tmp = xmlDoc(_obj=ret)\n return __tmp", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))", "def parse(filename):\n\n tree = etree.parse(filename)\n root = tree.getroot()\n # according to the structure of the xml article meta nested under \n # front then article-meta\n articleMeta = root[0][1]\n # pubmed central article id\n pmcId = ''\n # the author list, the list of names excluding corresponding\n # athor\n otherAuthors = []\n # the name and email of the corresponding authors\n cAuthors = []\n # container for all the author groups\n authorGroups = []\n \n for child in articleMeta:\n # find the pmc id\n if ((child.tag == 'article-id') and not(isEmpty(child.attrib))):\n if (child.attrib['pub-id-type'] == 'pmc'):\n pmcId = child.text\n # find the author group\n elif (child.tag == 'contrib-group'):\n authorGroups.append(child)\n # this child may contain important corresponding information\n elif (child.tag == 'author-notes'):\n authorNotes = child\n # find the publication date\n elif (child.tag == 'history'):\n for theDate in child:\n if ('date-type' in theDate.attrib and theDate.attrib['date-type'] == 'accepted'):\n #publiction date YEAR MONTH DAY\n if (theDate.find('year') != None):\n theYear = theDate.find('year').text\n else:\n theYear = 0\t\n if (theDate.find('month') != None):\n theMonth = theDate.find('month').text\n else:\n theMonth = 6\n if (theDate.find('day') != None):\n theDay = theDate.find('day').text\n else:\n theDay = 1\n\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n elif (child.tag == 'pub-date'): \n if ('pub-type' in child.attrib and (child.attrib['pub-type'] == 'ppub' or child.attrib['pub-type'] == 'epub')):\n #for grandchild in child: print(grandchild.tag)\n \n if (child.find('year') != None):\n theYear = child.find('year').text\n else:\n theYear = 0\n \n if (child.find('month') != None):\n theMonth = child.find('month').text\n else:\n theMonth = 6\n \n if (child.find('day') != None):\n theDay = child.find('day').text\n else:\n theDay = 1\t\t\t\t\t\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n case1 = False # will be used for post-processing, corr author identified but no email\n for authorGroup in authorGroups:\n # parse author group information\n for child in authorGroup:\n if (child.tag == 'contrib' and child.attrib['contrib-type'] == 'author'):\n # the first child is the name tag\n try:\n name = child[0].find('given-names').text + ' ' + child[0].find('surname').text\n except:\n return((-1,))\n if ('corresp' in child.attrib): # and child.attrib['corresp'] == 'yes'):\n # if it a corresponding author\n # check to see if there is email field\n if (len(child) > 2 and child[1].find('email') != None):\n data = (name, child[1].find('email').text)\n cAuthors.append(data)\n #else post-process this case: case(1)\n else:\n data = (name, 'null')\n cAuthors.append(data)\n case1 = True\n else: \n # handle EMBO style xml \n xrefList = findInSubtree(child, 'xref')\n if (len(xrefList) > 0):\n for xref in xrefList:\n if ('ref-type' in xref.attrib and xref.attrib['ref-type'] == 'corresp'):\n # this is an corresponding author\n data = (name, '')\n cAuthors.append(data)\n case1 = True\n if (case1 == False):\n otherAuthors.append(name) \n else:\n # if not a corresponding author\n otherAuthors.append(name)\n\n # not done yet, some corresponding author information are embedded in author-notes\n if (case1 and 'authorNotes' in locals()):\n i = 0\n # corresponding author identified but no email found\n for child in authorNotes:\n if (child.tag == 'corresp'):\n for grandchild in child:\n if (grandchild.tag == 'email'):\n if (i == len(cAuthors)): break\t\n cAuthors[i] = (cAuthors[i][0], grandchild.text)\n i = i + 1\n elif ('authorNotes' in locals()):\n # the linking information is embedded entirely in the text\n text = etree.tostring(authorNotes).strip().decode('utf-8')\n emailElements = findInSubtree(authorNotes, 'email')\n for name in otherAuthors:\n j = 0\n if (text.find(name) != -1 and j < len(emailElements)):\n data = (name, emailElements[j].text)\n cAuthors.append(data)\n otherAuthors.remove(name)\n j = j + 1\n\n # sanity check here, reject anything that may corrupt the database\n if ('pmcId' in locals() and 'publicationDate' in locals()):\n try:\n print(pmcId, otherAuthors, cAuthors, publicationDate)\n except:\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n else:\n return((-1,))", "def load_doc(filename):\n # open the file as read only.\n with open(filename, 'r') as fp:\n return fp.read()", "def docs():", "def readDoc(cur, URL, encoding, options):\n ret = libxml2mod.xmlReadDoc(cur, URL, encoding, options)\n if ret is None:raise treeError('xmlReadDoc() failed')\n return xmlDoc(_obj=ret)", "def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&amp;\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)", "def get_docs_and_page():\n _, *args = sys.argv[:]\n if len(args) > 0:\n print(pydoc.getdoc(*args))\n return pydoc.getdoc(*args)", "def build_document(self):\n pass", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def example_xml_file43():\n return load_xml('datacite-v4.3-full-example.xml')", "def test_doc_representation(self):\n doc_str = \"# This is a comment\\n* One\\nText text\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)\n\n doc_str = \"Text\\nMore text\\n\\n\\nSome empty lines and text\\n* HL\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)\n\n doc_str = \"- List one\\n + Slist one\\n + Slist two\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)", "def extract_paragraph_test(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc(parser = 'cde_parser')\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_test(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def example_xml_file41():\n return load_xml('datacite-v4.1-full-example.xml')", "def parseword(intext): # type: (str) -> str\n\n wordbinarydata = base64.b64decode(intext.strip())\n wordFileObj = io.BytesIO()\n wordFileObj.write(wordbinarydata)\n theword = docx.Document(wordFileObj)\n extractedText = ''\n for para in theword.paragraphs:\n extractedText = extractedText + para.text + '\\n'\n\n return extractedText", "def latex2wp():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path')\n #parser.add_argument('destination')\n args = parser.parse_args()\n\n source = None\n blocks = None\n paragraphs = []\n\n source = from_file(args.input_path)\n source = document(source)\n source = split_inline(source)\n blocks = split_blocks(source)\n fragments = process(blocks)\n\n html = refs('\\n'.join(fragments))\n print(html)", "def testMakeDocument(self):\n\n # I've split the wanted result string up into substrings so I can\n # amend it more easily (or so I hope).\n trivial_package = \"\"\"\\\n<document source=\"Package trivial_package\">\n <section class=\"package\" id=\"package-trivial-package\" name=\"package trivial_package\">\n <title>\n Package trivial_package\\n\"\"\"\n\n # The \"xml:space\" attribute is by observation, not prediction\n module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-init\" name=\"module trivial_package.__init__\">\n <title>\n Module trivial_package.__init__\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n A simple docstring.\\n\"\"\"\n\n module_file1 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file1\" name=\"module trivial_package.file1\">\n <title>\n Module trivial_package.file1\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This is the first example file. It *does* use reStructuredText.\n <section class=\"class\" id=\"class-trivial-package-file1-fred\" name=\"class trivial_package.file1.fred\">\n <title>\n Class trivial_package.file1.Fred\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n An example class - it announces each instance as it is created.\\n\"\"\"\n\n module_file2 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file2\" name=\"module trivial_package.file2\">\n <title>\n Module trivial_package.file2\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This module is *not* using reStructuredText for its docstrings.\\n\"\"\"\n\n non_python_file = \"\"\"\\\n <section class=\"file\" id=\"file-trivial-package-not-python\" name=\"file trivial_package.not_python\">\n <title>\n File trivial_package.not_python\n <paragraph>\n File \n <literal>\n not_python\n is not a Python module.\\n\"\"\"\n\n sub_package = \"\"\"\\\n <section class=\"package\" id=\"package-trivial-package-sub-package\" name=\"package trivial_package.sub_package\">\n <title>\n Package trivial_package.sub_package\\n\"\"\"\n\n sub_module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-sub-package-init\" name=\"module trivial_package.sub_package.__init__\">\n <title>\n Module trivial_package.sub_package.__init__\\n\"\"\"\n\n wanted_result = (trivial_package + module_init + module_file1 +\n module_file2 + non_python_file + sub_package +\n sub_module_init)\n\n tree = parse_package(\"trivial_package\")\n\n document = make_document(tree)\n\n actual_result = document.pformat()\n\n if wanted_result != actual_result:\n print \"+++++++++++++++++++++++++ WANT\"\n print wanted_result\n print \"+++++++++++++++++++++++++ GOT\"\n print actual_result\n print \"+++++++++++++++++++++++++\"\n\n self.assertEqual(actual_result,wanted_result)", "def process(self, doc):\n if not type(doc) is xmldoc.XMLDoc:\n raise ValueError(\"doc must be a XMLDoc\")\n\n pyIdlak_txp.PyIdlakModule_process(self._mod, doc.idlak_doc)", "def getDoc(self):\r\n return self.__doc__", "def process(self, doc, is_):\n implementation = Implementation(self)\n implementation.process(doc, is_)\n return implementation.html", "def getRtf(self):\n self.pieces = []\n for node in self.root.findall(\"MiscellaneousDocumentText\"):\n for child in node:\n if child.tag == \"Para\":\n self.__addPara(child)\n elif child.tag in (\"ItemizedList\", \"OrderedList\"):\n self.__addList(child, child.tag)\n return \"\".join(self.pieces)", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def get_para_data(output_doc_name, paragraph):\n output_para = output_doc_name.add_paragraph()\n for run in paragraph.runs:\n output_run = output_para.add_run(run.text)\n output_run.font.size=run.font.size\n # Run's bold data\n output_run.bold = run.bold\n # Run's italic data\n output_run.italic = run.italic\n # Run's underline data\n output_run.underline = run.underline\n # Run's color data\n output_run.font.color.rgb = run.font.color.rgb\n # Run's font data\n output_run.style.name = run.style.name\n # Paragraph's alignment data\n output_para.paragraph_format.alignment = paragraph.paragraph_format.alignment", "def test_document_usage(self):\n fname = '10.1039_C6OB02074G.html'\n f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'rsc', fname), 'rb')\n d = Document.from_file(f, readers=[RscHtmlReader()])\n self.assertEqual(len(d.elements), 60)", "def getDocument(self, *args):\n return _libsbml.SBMLConverter_getDocument(self, *args)", "def process():\n reader = owslib.wps.WPSDescribeProcessReader()\n root = reader.readFromString(open(resource_file(\"process_description.xml\")).read())\n xml = root.findall(\"ProcessDescription\")[0]\n return owslib.wps.Process(xml)", "def get(self, docid):\n file = os.path.join(self.dirname, docid)\n with open(file,'r',encoding='utf-8') as f:\n text = f.read()\n return text", "def extract_sentences(paper_path, para_yes):\n\n f = open(paper_path, 'rb')\n doc = Document.from_file(f, readers=[HtmlReader()])\n\n sen_yes_arr = list()\n sen_no_arr = list()\n\n elem_all = np.arange(0,len(doc))\n para_no = np.delete(elem_all, para_yes)\n\n for i in para_no:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_no_arr.append(sentence)\n\n for i in para_yes:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_yes_arr.append(sentence)\n\n\n return sen_yes_arr, sen_no_arr", "def get_article(doi, output='txt'):\n xml = download_article(doi)\n if xml is None:\n return None\n et = ET.fromstring(xml)\n full_text = et.find('article:originalText', elsevier_ns)\n if full_text is None:\n logging.info('Could not find full text for %s.' % doi)\n return None\n main_body = full_text.find('xocs:doc/xocs:serial-item/ja:article/ja:body',\n elsevier_ns)\n if main_body is None:\n return None\n if output == 'xml':\n return main_body\n elif output == 'txt':\n sections = main_body.findall('common:sections/common:section',\n elsevier_ns)\n full_txt = ''\n for s in sections:\n # Paragraphs that are directly under the section\n pars = s.findall('common:para', elsevier_ns)\n # Paragraphs that are under a section within the section\n pars += s.findall('common:section/common:para', elsevier_ns)\n for p in pars:\n # Get the initial string inside the paragraph\n if p.text is not None:\n full_txt += p.text\n # When there are tags inside the paragraph (for instance\n # references), we need to take those child elements one by one\n # and get the corresponding tail strings and join these. \n full_txt += ''.join([c.tail if c.tail is not None \n else '' for c in p.getchildren()])\n full_txt += '\\n'\n else:\n logging.error('Unknown output format %s.' % output)\n return None\n return full_txt", "def process_wiki_file(args: Tuple[str, str, int]) -> str:\n filepath, language, min_sent_word_count = args\n with bz2.open(filepath, \"rt\", encoding=\"utf8\") as bz2_file:\n\n # Extract text between <doc> xml tags\n soup = BeautifulSoup(bz2_file.read(), \"lxml\")\n docs = soup.find_all(\"doc\")\n wiki_dump_content = \"\"\n for i, doc in enumerate(docs):\n processed_text = process_wiki_doc_text(\n doc.text, language, min_sent_word_count\n )\n if len(processed_text) == 0:\n continue\n\n # Append to result\n if i > 0 and len(wiki_dump_content) > 0:\n wiki_dump_content += \"\\n\"\n wiki_dump_content += processed_text\n\n return wiki_dump_content", "def has_doc() -> None:", "def get_doc_prov(j, gcis_url, refList):\n gcis_ns = \"https://gcis-search-stage.jpl.net:3000/gcis.owl#\"\n doc = ProvEsDocument()\n bndl = None\n \n#to get people attributed to, you need to grab article -> jornal_identifier -> look up in references\n# for ref in refList:\n# if ref['child_publication'] == j['uri']:\n \n\n\n doc_attrs = [\n (\"prov:type\", 'gcis:Article'),\n (\"prov:label\", j['title']),\n (\"prov:location\", j['uri']),\n #(\"prov:wasAttributedTo\", j['']),\n ]\n doc.entity('bibo:%s' % j['identifier'], doc_attrs)\n\n prov_json = json.loads(doc.serialize())\n\n return prov_json", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def example_xml_file():\n return load_xml('datacite-v3.1-full-example.xml')", "def test_single_document_processing(self):\n print('submitting document...')\n\n for doc in self.DOCS:\n result = self.client.submit_document(doc)\n\n from pprint import pprint\n print(result)\n self.assertTrue(result != \"\")", "def read(self, content: str):\n documents = []\n # 1. Split the text in documents using string '-DOCSTART- -X- O O' and loop over it\n content = content.split('-DOCSTART- -X- O O')\n for doc in content:\n if doc != '':\n words = []\n sentences = []\n labels = []\n start = 0\n # 2. Split lines and loop over\n str_sentences = doc.split('\\n\\n')\n # 3. Make vectors of tokens and labels (colunn 4) and at the '\\n\\n' make a sentence\n for sentence in str_sentences:\n if sentence != '':\n tokens = sentence.split('\\n')\n for token in tokens:\n if ' ' in token :\n cols = token.split(' ')\n words.append(cols[0])\n labels.append(cols[1])\n sentences.append(Sentence(doc, start, start+len(tokens)))\n start += len(tokens)\n # 4. Create a Document object\n documents.append(Document.create_from_vectors(words, sentences, labels))\n\n return documents", "def read_doc(self):\n self.data_read[:] = []\n for para in self.document.paragraphs:\n text = para.text\n # skip blank lines\n if text.strip():\n # remove duplicated spaces\n text = ' '.join(text.split())\n # for older versions of final CAPA's\n self.fill_project_info(text, new_format=False)\n self.data_read.append(text)\n\n # Constant in old & new report format\n # Batch/Project name\n # Lead(s)'s name\n # Reported date\n for i in range(0, len(self.data_read)):\n if next((x for x in self.leads if x in self.data_read[i]), None):\n self.project_info.update({'Project Name': self.data_read[i - 1]})\n self.project_info.update({'Lead(s)': self.data_read[i]})\n self.project_info.update({'Date Reported': self.data_read[i + 1]})\n break", "def main(src_xml, out='output.txt', sep='::'):\n \n soup = bs(open(src_xml).read(), 'lxml') \n data = [(p[0].text, s_convert(p[1].text), p[2].text)\n for p in zip(soup('weightedfrequency'),\n soup('headword'),\n soup('shortdefinition'))]\n res = ''\n words = []\n for item in data:\n try:\n if item[1].replace('\\n', '') in words:\n continue\n else:\n res += '{}{}{}{}{}\\n'.format(item[0],\n sep,\n item[1].replace('\\n', ''),\n sep,\n item[2].replace('\\n', ''))\n words.append(item[1].replace('\\n', ''))\n except:\n continue\n with open(out, 'w') as f:\n f.write(res)\n return res", "def xml2html(self):\n handler = open(self.xml_doc).read()\n soup = BeautifulSoup(handler, 'xml')\n\n fw = open(self.filename_out, 'w')\n\n fw.write(\"<!DOCTYPE html>\" + os.linesep)\n fw.write(\"<html>\" + os.linesep)\n fw.write(\"<head>\" + os.linesep)\n fw.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">' + os.linesep)\n fw.write(\"<link rel=\\\"stylesheet\\\" href=\\\"%s\\\" type=\\\"text/css\\\" />\" % self.stylesheet_name + os.linesep)\n fw.write(\"<title></title>\" + os.linesep)\n fw.write(\"</head>\" + os.linesep)\n fw.write(\"<body>\" + os.linesep)\n\n # Load styles in dictionaries\n for style in soup.find_all(\"style\"):\n style_name = style.get(\"style:name\")\n #print \"style: %s children: %s descendants: %s\" % (str(style_name), str(len(list(style.children))), len(list(style.descendants)))\n for style_child in style.children:\n fs = style_child.get(\"fo:font-style\")\n if fs:\n self.style_fontstyle[style_name] = fs\n fontw = style_child.get(\"fo:font-weight\")\n if fontw:\n self.style_fontweight[style_name] = fontw\n # read alignment\n txta = style_child.get(\"fo:text-align\")\n if txta:\n self.style_textalignment[style_name] = txta\n # !!!\n tu = style_child.get(\"style:text-underline-type\")\n if tu:\n self.style_textunderline[style_name] = \"underlined\"\n # page break\n break_before = style_child.get(\"fo:break-before\")\n if break_before:\n self.style_break_before[style_name] = break_before\n\n\n # Navigate down the document through h and p tags\n #\n for text in soup.find_all(re.compile(\"^h|^p\")):\n\n # From bs4 docs: If a tag has only one child, and that child is a NavigableString, the child is made available as .string:\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"P9\">- Any text here!</text:p>\n #\n # To do:\n #\n # Beware of this case:\n # - <text:p text:style-name=\"P8\">\n # <text:span text:style-name=\"T4\">\n #\n\n # Get the attributes so the styles and the outlines\n text_attrs = dict(text.attrs)\n\n # Get the styles, if any\n try:\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n if text.string:\n t = unicode(text.string)\n if t:\n fw.write(self.outliner(self.stylizer(t, t_style), t_outline_level, t_style).encode('utf-8'))\n\n # e.g. page breaks come as a node with no children whose style contains fo:break-before:\"page\"\n elif len(list(text.children)) == 0:\n fw.write(self.outliner(unicode(\"\"), t_outline_level, t_style).encode('utf-8'))\n\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"Textbody\">\n # jkjksk skjkjkjs dhh\n # <text:s />\n # <text:span text:style-name=\"T3\">Bold</text:span>\n # <text:s />\n # </text:p>\n #\n # else drill down one level\n else:\n buffer = unicode(\"\")\n t = buffer\n u = buffer\n t_outline_level = \"paragraph\"\n t_style = \"\"\n for i in text.children:\n # Get the attributes so the styles\n try:\n text_attrs = dict(i.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n # whenever the element has no style\n # take the parent's one\n try:\n text_attrs = dict(i.parent.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n # if the current tag has only one child, and that child is a NavigableString\n if i.string:\n t = unicode(i.string)\n\n # space\n elif i.name == \"s\":\n t = unicode(\"&nbsp;\")\n\n # else drill down another level\n else:\n t = unicode(\"\")\n for j in i.children:\n if j.string:\n u = unicode(j.string)\n elif j.name == \"s\":\n u = unicode(\"&nbsp;\")\n else:\n u = unicode(\"\")\n if u:\n t = t + self.stylizer(u, t_style)\n\n # build up a unicode string containing the whole paragraph\n if t:\n buffer = buffer + self.stylizer(t, t_style)\n\n # outline the buffered unicode string and write it to the output file\n fw.write(self.outliner(buffer, t_outline_level, t_style).encode('utf-8'))\n\n fw.write(\"</body>\" + os.linesep)\n fw.write(\"</html>\" + os.linesep)\n fw.close()" ]
[ "0.58561695", "0.57505774", "0.57142305", "0.56927013", "0.5655105", "0.55894864", "0.5549124", "0.54334044", "0.5345397", "0.5345397", "0.5336243", "0.5295321", "0.52721536", "0.52331185", "0.52325106", "0.5207037", "0.5178504", "0.51673114", "0.5162166", "0.51004875", "0.5095659", "0.5095659", "0.5090703", "0.5081605", "0.50775087", "0.5074052", "0.5068945", "0.5053794", "0.50528896", "0.50479126", "0.5046279", "0.50376654", "0.50368756", "0.50286394", "0.5025347", "0.5020461", "0.50154626", "0.5015265", "0.5013307", "0.50108624", "0.5009648", "0.5003477", "0.49987894", "0.4981", "0.49658743", "0.49650964", "0.4953536", "0.49445885", "0.4940926", "0.4929024", "0.49254215", "0.49145317", "0.49114928", "0.49090517", "0.49031606", "0.4899631", "0.48968866", "0.48944473", "0.48933098", "0.48917288", "0.4889304", "0.48875928", "0.48856556", "0.48855928", "0.4873672", "0.4869349", "0.48658985", "0.4864761", "0.48615143", "0.48582673", "0.485744", "0.4855517", "0.4854661", "0.48539436", "0.48535016", "0.48502147", "0.48479328", "0.4847486", "0.4847088", "0.48455316", "0.48416904", "0.4841184", "0.4835481", "0.48261756", "0.4817517", "0.4808638", "0.48072398", "0.4804497", "0.48021996", "0.47953996", "0.47914502", "0.4785967", "0.4775197", "0.4771743", "0.47696176", "0.47670817", "0.47663277", "0.47649446", "0.4764133", "0.47565734", "0.4753665" ]
0.0
-1
>>> import shutil >>> import os.path >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) True >>> cv1.deleteconvert() >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) False >>> f.close() >>> shutil.rmtree(basepath)
def deleteconvert(self): filename = os.path.join(self.docx_path, self.name.docx) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.html_path, self.name.html) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.docbook_path, self.name.xml) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.markdown_path, self.name.md) if os.path.isfile(filename): os.remove(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n filename = os.path.join(asciitest_out_dir, save_cmake_filename(doc_file)).replace(\"\\\\\",\"/\")\n \n #print(\"cleanup %s %s\" % (doc_file, filename))\n try:\n os.remove(filename)\n except:\n pass", "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def clean_pdf():\n xnt.build.tex.clean(path=\"./\", remove_pdf=True)", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))", "def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def process_file_markdown(src_pathname):\n dest_pathname = path_src_to_dest(src_pathname, '.html')\n\n logging.info(\"Processing Markdown file: %s -> %s\" %\n (str(src_pathname), str(dest_pathname)))\n\n ensure_dest_dir(dest_pathname)\n\n with open(dest_pathname, 'w', encoding='UTF-8') as f:\n outstr = docgen.generate.generate_doc(str(src_pathname),\n verbose=config['verbose'],\n inlinecss=True,\n inlinewave=True,\n asdiv=False)\n f.write(outstr)\n\n return dest_pathname", "def clean():\n shutil.rmtree(BUILD_PATH, ignore_errors=True)\n shutil.rmtree(os.path.join(SOURCE_PATH, \"reference\", \"api\"), ignore_errors=True)", "def __del__(self):\n shutil.rmtree(self.epub_dir)", "def clean():\n shutil.rmtree(BUILD_PATH, ignore_errors=True)\n shutil.rmtree(\n os.path.join(SOURCE_PATH, \"reference\", \"api\"), ignore_errors=True\n )", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def setUp(self):\n self.outdir = \"tests/out/pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n else:\n files = glob.glob(self.outdir)\n for f in files:\n if os.path.isfile(f):\n os.remove(f)", "def main(base_path):\n current = os.getcwd()\n try:\n if not(os.path.exists(base_path)):\n ans = 'y'\n if p_out:\n print(\"Do you want to create \" + base_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n pass\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n else:\n m_path = os.path.join(base_path, 'nzmath/manual')\n if os.path.exists(m_path):\n ans = 'y'\n if p_out:\n print(\"Do you want to remove \" + m_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n for root, dirs, files in os.walk(m_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n dirname = os.path.join(base_path, 'nzmath/manual/modules')\n if not(os.path.exists(dirname)):\n os.makedirs(dirname)\n os.chdir(os.path.join(base_path, 'nzmath/manual/'))\n csspage = convertHPURL('manual/default.css')\n if p_out:\n print(\"get css from \" + csspage)\n retryConnection(urllib.request.urlretrieve, csspage, 'default.css')\n while ad_list:\n files = ad_list.pop()\n MyWikiParser(files).feeds()\n if p_out:\n print(\"\\n\" + \"All process is done!\" + \"\\n\")\n print(\"Ok, now created nzmath-current manual located to\")\n print(os.path.join(base_path, \"nzmath\"))\n print(\"if you check difference between nzmath-cvs manual, with GNU diff,\")\n print(\"$ diff -ubBr /tmp/nzmath/manual {your-nzmathcvs-repo}/manual\")\n print(\"or you check only new version files,\")\n print(\"$ diff -r --brief /tmp/nzmath/manual {your-nzmathcvs-repo}/manual .\")\n except NoneOutput:\n if p_out:\n print('end.')\n except InputError:\n print(\"Error: Invalid input!\")\n except LookupError:\n print(\"Error: Maybe, Japanese encodings(ex.euc_jp) is not supported.\")\n except:\n if p_out:\n print(\"Check \" + base_path + \" (dir? truly path? and so on.)\")\n print(\"Delete \" + base_path + \" and try again.\")\n print(\"(Maybe, caused by problem of network connection)\\n\")\n print(sys.exc_info()[0])\n os.chdir(current)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def cli(ctx):\n ctx.invoke(clean)\n ctx.invoke(build_convert_upload)", "def _finalize_iteration(self, verbose: bool):\n super().delete_remote_files()\n self.comm.storyteller.document_task(task=\"adam_documentation\")", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)", "def clean():\n clean_files()", "def clean_pdf_dir():\n # Create the pdf directory if it does not exist\n if not os.path.isdir(pdf_dir):\n os.makedirs(pdf_dir)\n return\n\n # Get the pdf files list and remove them\n pdf_files = [f for f in os.listdir(pdf_dir) if f.lower().endswith('pdf')]\n for pdf_name in pdf_files:\n os.remove(os.path.join(pdf_dir, pdf_name))", "def clean(raw_file,clean_path,results_path):\n clean_file = clean_path + 'clean.pkl'\n stats_file = results_path + 'corpus_stats.pkl' \n raw_text = load_raw_text(raw_file) \n clean_docs = list()\n nlp = spacy.load('en')\n i = 0\n print('Cleaning documents...')\n for text in raw_text: \n words = nlp(text)\n raw_count = len(words)\n words = [w for w in words if not w.is_stop]\n words = [w for w in words if w.ent_type_ != 'PERSON' and w.pos_ != 'PROPN']\n words = [w for w in words if w.is_alpha and not w.is_digit]\n words = [w.lemma_ for w in words if w.text != '-PRON-']\n word_string = ' '.join(words)\n word_string = word_string.lower()\n doc = Document(word_string)\n doc.clean_count = len(words)\n doc.raw_count = raw_count\n clean_docs.append(doc)\n if i%10 == 0:\n print('Document: ' + str(i))\n i += 1\n clean_corpus = Corpus(clean_docs)\n clean_corpus.save_corpus_text(clean_file)\n clean_corpus.save_corpus_stats(stats_file)\n return clean_corpus", "def clean_android_target_pdf_dir():\n if os.path.exists(android_target_pdf_dir):\n shutil.rmtree(android_target_pdf_dir)", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def clean(ctx):\n header(clean.__doc__)\n with ctx.cd(ROOT):\n for pattern in CLEAN_PATTERNS:\n info(\"Removing {0}\", pattern)\n ctx.run(\"rm -rf {0}\".format(pattern))", "def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def clean(ctx, so=False, cache=False):\n for name in ctx.shell.files('.', '.coverage*', recursive=False):\n ctx.shell.rm(name)\n for name in ctx.shell.files('bench', '.out.*', recursive=False):\n ctx.shell.rm(name)\n ctx.shell.rm_rf(\n 'docs/coverage',\n 'docs/gcov',\n 'build',\n 'dist',\n 'wheel/dist',\n ctx.doc.userdoc,\n 'docs/_userdoc/_build',\n ctx.doc.website.source,\n ctx.doc.website.target,\n )\n if cache:\n cacheclean(ctx)\n if so:\n soclean(ctx)", "def test_deleteInput(self):\n input1 = self.getArbitraryLoreInput(0)\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.builder.build(\"whatever\", self.howtoDir, self.howtoDir,\n self.templateFile, deleteInput=True)\n self.assertTrue(self.howtoDir.child('one.html').exists())\n self.assertFalse(self.howtoDir.child('one.xhtml').exists())", "def process_markdown(input_markdown, output_name, latex_img_dir = \"./\", input_path = \"./\", thumb_size=64):\n\tmd = markdown.Markdown( extensions=[ 'meta'\n\t , 'codehilite'\n\t , 'tables'\n\t , 'def_list'\n\t , 'footnotes'\n\t , ResourceExtractor({ \"resource_dir\": output_name\n\t , \"relative_path\": input_path\n\t })\n\t , AbstractExtractor()\n\t , ToCExtractor()\n\t , MathJaxExtension()\n\t , LaTeX({ \"latex_img_dir\": latex_img_dir\n\t , \"input_path\": input_path\n\t })\n\t ]\n\t )\n\t\n\t# Basic HTML conversion\n\thtml = md.convert(input_markdown)\n\t\n\t# Generate table of contents\n\ttoc = md.toc\n\t\n\t# Choose document title (default to the output name)\n\ttitle = output_name\n\t# Use the first heading if possible\n\tif len(toc) > 0:\n\t\ttitle = toc[0][1]\n\t# Better yet, get the explicitly given metadata\n\ttitle = md.Meta.get(\"title\", [title])[0]\n\t\n\t# Choose document subtitle (only available from metadata)\n\tsubtitle = md.Meta.get(\"subtitle\", [None])[0]\n\t\n\t# Get the image from the metadata\n\timg = md.Meta.get(\"img\", [None])[0]\n\timg_alt = md.Meta.get(\"img_alt\", [title])[0]\n\t\n\t# The abstract should be taken to be the first paragraph.\n\tabstract = md.abstract if md.abstract is not None else \"\"\n\t\n\t# Get the list of tags\n\ttags = md.Meta.get(\"tags\", [])\n\t\n\t# Get the list of files to include\n\tincludes = md.Meta.get(\"include\", [])\n\t\n\t# Get the show option\n\tshow = md.Meta.get(\"show\", [\"True\"])[0] == \"True\"\n\t\n\tfiles = md.resources\n\t\n\t# Add the article image to the list of files and create a thumbnail if\n\t# possible.\n\tif img is not None and img.startswith(\"file://\"):\n\t\timg = os.path.join(input_path, img[len(\"file://\"):])\n\t\timg_output_name = \"%s/%s\"%(output_name,\n\t\t unique(os.path.basename(img),\n\t\t [f.split(\"/\")[-1] for (_,f) in files]))\n\t\t\n\t\timg_thumbnail = \"%s.thumb.png\"%img\n\t\t\n\t\tp = Popen( [\"convert\"\n\t\t , img\n\t\t , \"-thumbnail\", \"%dx%d\"%(thumb_size,thumb_size)\n\t\t , img_thumbnail]\n\t\t , stdin = None\n\t\t , stdout = sys.stderr\n\t\t , stderr = sys.stderr\n\t\t )\n\t\tif p.wait() != 0:\n\t\t\traise Exception(\"Creating img thumbnail failed.\")\n\t\t\n\t\tfiles.append((img_thumbnail, img_output_name))\n\t\timg = img_output_name\n\t\n\t# Generate meta-data\n\tmeta_data = {\n\t\t\"url\" : output_name,\n\t\t\"title\" : title,\n\t\t\"subtitle\" : subtitle,\n\t\t\"img\" : img,\n\t\t\"img_alt\" : img_alt,\n\t\t\"abstract\" : abstract,\n\t\t\"tags\" : tags,\n\t\t\"show\" : show,\n\t}\n\t\n\treturn html, toc, meta_data, files, includes", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.protf_pdf:\n if os.path.isfile(instance.protf_pdf.path):\n os.remove(instance.protf_pdf.path)", "def clean(self):\n \n with current_context() as ctx:\n project_outputs = ctx.get('current.project_outputs')\n if project_outputs is not None:\n if self._project in project_outputs:\n del project_outputs[self._project]\n \n path = self.path\n if os.path.isfile(path):\n args = [self.command, '-f', path, '-t', 'clean', '-g']\n try:\n check_call(args)\n except CalledProcessError as ex:\n return ex.returncode\n self.remove()\n return 0", "def test_no_deletion(self):\n\t\tanalyse_text(self.filename)\n\t\tself.assertTrue(os.path.exists(self.filename))", "def clean(ctx):\n logger = logging.getLogger(__name__)\n\n root_project_dir = discover_conf_py_directory(ctx.obj[\"root_project_dir\"])\n dirnames = [\"py-api\", \"_build\", \"modules\", \"packages\", \"_doxygen\"]\n dirnames = [\n os.path.join(root_project_dir, dirname) for dirname in dirnames\n ]\n for dirname in dirnames:\n if os.path.isdir(dirname):\n shutil.rmtree(dirname)\n logger.debug(\"Cleaned up %r\", dirname)\n else:\n logger.debug(\"Did not clean up %r (missing)\", dirname)", "def cleanup(self): \n if os.path.exists(self.inpms):\n shutil.rmtree(self.inpms)", "def clean(self) -> None:\n if self.out_dir.exists():\n shutil.rmtree(self.out_dir)", "def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def clean_file(input_path, output_path):\n print(\"Comenzando limpieza...\")\n dc = DataCleaner(input_path)\n custom_cleaning_before_rules(dc)\n dc.clean(RULES)\n custom_cleaning_after_rules(dc)\n dc.save(output_path)\n print(\"Limpieza finalizada exitosamente!\")", "def clean(ctx):\n ctx.run(\"rm -rf build/html\")", "def post_trigger_clean(*args, **kwargs) -> None:\n with suppress(FileNotFoundError):\n shutil.rmtree(\"/tmp/cppcheck-152\")", "def clean():\n if os.path.exists('_build'):\n shutil.rmtree('_build')", "def handle_cleaning():\n extra_fl = ['changesInfos', 'printerSettings']\n fld_path = f'{output_path}/ppt'\n out_rel_path = f'{fld_path}/_rels/presentation.xml.rels'\n root, tree = gen_tree(out_rel_path)\n \n for i in extra_fl:\n shutil.rmtree(f'{fld_path}/{i}')\n \n for relation in root:\n attrib = relation.attrib\n if i in attrib['Target']:\n root.remove(relation)\n \n tree.write(out_rel_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return", "def delete_document(self):\n pass", "def delete(tbd, tipe):\n des = \"Xblog/docs/\" + tbd.replace(\".ipynb\", \".html\")\n uninstall(des)\n if tipe == \"Xpage\":\n os.remove(des)\n ccc.success(\"deleting \" + des)\n des_pdf = des.replace(\".html\",\".pdf\").replace(\"notebooks\", \"pdfs\")\n os.remove(des_pdf)\n ccc.success(\"deleting \" + des_pdf)\n if tbd == \"notebooks/welcome.ipynb\":\n if os.path.isfile(\"Xblog/README.md\"):\n cnv.md2html()\n else:\n with open(\"Xblog/docs/notebooks/welcomme.html\", 'w') as f:\n f.write(\"<html>\\n<body>\\n<h1 align=\\\"center\\\">Welcome to Xbooks blogs!</h1>\\n<h4 align=\\\"center\\\">This blog has no welcome page<br/>if you're maintainer of this blog, kindly write either README.md or notebooks/welcome.ipynb file!</h4>\\n</body>\\n</html>\\n\")\n f.close()\n if tipe == \"Xbook\":\n shutil.rmtree(des)\n ccc.success(\"deleting \" + des)\n return True", "def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)", "def convert(self):\n logger.info('Convert: %s' % self.base)\n if self.mimetype in ['application/msword',\n \"application/vnd.openxmlformats-officedocument\"\n \".wordprocessingml.document\"]:\n if 'multipart/related' in self.stream:\n self.process_mht()\n returncode = self.convert_docfile(self.docx_path, self.name.docx,\n self.docbook_path, self.name.xml)\n else:\n returncode = self.convert_docfile(self.source_path, self.name,\n self.docbook_path, self.name.xml)\n if returncode is False:\n returncode = self.convert_docfile(self.source_path, self.name,\n self.docx_path, self.name.docx)\n returncode = self.convert_docfile(self.docx_path, self.name.docx,\n self.docbook_path, self.name.xml)\n if not os.path.exists(os.path.join(\n self.docbook_path, self.name.xml)):\n logger.info('Not exists')\n self.resultcode = 2\n return False\n if returncode is False:\n self.resultcode = 3\n return False\n self.remove_note()\n self.file_docbook_to_markdown()\n logger.info(' '.join([self.base.base, self.name.base, 'Success']))\n self.resultcode = 0\n return True\n else:\n logger.info('Skip')\n self.resultcode = 1\n return False", "def run(self) -> None:\n\n result = subprocess.run(self.pandoc, check=True).returncode\n if result:\n print(f'pandoc error: {result}')\n sys.exit(1)", "def test_text_classifier_del(self):\n pass", "def main():\n\n parser = argparse.ArgumentParser(\n description='Convert collection of bibtex files to clean markdown script.')\n parser.add_argument('-i', '--input', nargs='+', required=False,\n help='Input bibtex files. Defaults to bib/*')\n parser.add_argument('-o', '--output', default='publications.md', required=False,\n help='Output markdown floadile.')\n args = parser.parse_args()\n\n # Build setup\n os.makedirs(BUILD_ROOT, exist_ok=True)\n build_dir = os.path.join(\n BUILD_ROOT,\n datetime.now().strftime(datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')))\n os.makedirs(build_dir)\n\n # Find and merge all bibtex files\n input_files = args.input\n if input_files is None:\n input_files = [os.path.join(BIB_ROOT, f) for f in os.listdir(BIB_ROOT)\n if os.path.isfile(os.path.join(BIB_ROOT, f))]\n merge_file = merge_bibtex(input_files, build_dir)\n\n # Parse merged bibtex file with custom options\n parse_file = parse_bibtex(merge_file, build_dir)\n\n # Render bibtex to markdown\n render_file = render_bibtex(parse_file, build_dir, args.output)", "def preview_file_cleanup(sender, **kwargs):\n\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def teardown(self):\n self.file_comm.remove_file()\n super(TestCisAsciiFileOutput, self).teardown()", "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "def tearDown(self):\n rmtree(self.output_path)\n rmtree(self.content_path)", "def clean(obj):\n clean_up_generated_files(obj)", "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "def delete(self,result):\n path = self.get_archive_file_path(result) if isinstance(result,RunResults) else result\n if os.path.exists(path):\n os.remove(path)", "def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]", "def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')", "def run():\n assert os.path.exists(args.input_path), \"input_path doesn't exist\"\n assert os.path.exists(args.output_path), \"output_path doesn't exist\"\n\n # read all the paths to the input documents\n doc_files = []\n for root, dirs, files in os.walk(args.input_path):\n for file in files:\n if not file.endswith('gz') and not file.endswith('xml'):\n continue\n doc_files.append(os.path.join(root, file))\n print('{} medline files found from {}'\n ''.format(len(doc_files), args.input_path))\n\n print('converting...')\n pool = Pool(processes=args.num_workers)\n total_doc = 0\n total_batch = 0\n total_empty = 0\n for d, b, n in tqdm(pool.imap_unordered(partial(convert), doc_files),\n total=len(doc_files)):\n total_doc += d\n total_batch += b\n total_empty += n\n\n print('total docs: {}, total batches: {} created (empty doc {})'\n ''.format(total_doc, total_batch, total_empty))", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.document:\n if os.path.isfile(instance.document.path):\n os.remove(instance.document.path)", "def do_single_file_preprocess(pdf_file):", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def _cleanup(self):\n # delete stdout/stderr\n if os.path.isfile(self.stdout):\n os.unlink(self.stdout)", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def clean(self):\n\t\tself.archiver.closeFile()", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def deleteOutputPath(filePath):\n if os.path.exists(filePath):\n shutil.rmtree(filePath)", "def __remove_base_directory__():\n p = subprocess.Popen('rm -rf {}/.wcscanner'.format(context.__BASE_PATH__), shell=True)\n p.wait()", "def clean_up(self):\n os.system(f'rm -r {self.submission_folder_path}')\n\n return", "def process_pdf(pdf):\n\n if os.path.exists(legend_images_dir):\n subprocess.call([\"rm\", \"-rf\", legend_images_dir])\n os.makedirs(legend_images_dir)\n\n if os.path.exists(plot_images_dir):\n subprocess.call([\"rm\", \"-rf\", plot_images_dir])\n os.makedirs(plot_images_dir)\n\n if os.path.exists(csv_output_dir):\n subprocess.call([\"rm\", \"-rf\", csv_output_dir])\n os.makedirs(csv_output_dir)\n\n if os.path.exists(pdf_output_dir):\n subprocess.call([\"rm\", \"-rf\", pdf_output_dir])\n os.makedirs(pdf_output_dir)\n\n genImages(pdf)", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def decompile():\n #list of files to decompile and results decompile\n dataprocessor_files = []\n\n #list of files to decompile and results decompile for 1C v7.7\n dataprocessor_files_v7 = []\n\n #list of files to decompile and results decompile for 1C MD\n dataprocessor_files_MD = []\n\n #set the exit code\n exit_code = 0\n\n #Find datapocessor files\n for filename in get_list_of_comitted_files():\n #Check the file extensions\n logging.info(\"file to check %s\" % filename)\n if filename[-3:] == \"ert\":\n dataprocessor_files_v7.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n if filename[-3:] in ['.MD','.md']:\n dataprocessor_files_MD.append(filename)\n logging.info(\"file %s\" % filename)\n continue \n\n dirsource = os.path.abspath(os.path.join(os.path.curdir, \"src\"))\n curabsdirpath = os.path.abspath(os.path.curdir) \n\n if len(dataprocessor_files) > 0:\n #pathbin1c = \"C:\\\\Program Files\\\\1cv82\\8.2.17.153\\\\bin\\\\1cv8.exe\"\n #pathbin1c = \"c:\\\\Program Files (x86)\\\\1cv8\\\\8.3.4.304\\\\bin\\\\1cv8.exe\"\n pathbin1c = get_path_to_1c()\n\n if len(dataprocessor_files_v7) > 0:\n for filename in dataprocessor_files_v7:\n print(\"ert file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n\n print(\"ert file %s\" % fullpathfile )\n\n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname)\n newpath2 = os.path.join(newsourcepath, basename)\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n #print(\"curabsdirpath %s\" % curabsdirpath)\n #print(\"newpath2 %s\" % newpath2)\n #print(\"basename %s\" % basename)\n\n t1 = format(\"gcomp -q -d -F %s -D %s -v --no-ini --no-version --no-empty-mxl\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1]) \n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newpath2 )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)\n\n if len(dataprocessor_files_MD) > 0:\n for filename in dataprocessor_files_MD:\n print(\"MD file %s\" % filename)\n #TODO: добавить копирование этих же файлов в каталог src/имяфайла/...\n #get file name.\n fullpathfile = os.path.abspath(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n fullbasename = os.path.basename(filename)\n newdirname = os.path.dirname(filename)\n \n #Скопируем сначало просто структуру каталогов.\n if not os.path.exists(dirsource):\n os.makedirs(dirsource)\n #для каждого файла определим новую папку.\n newsourcepath = os.path.join(dirsource, newdirname, \"MD\")\n if not os.path.exists(newsourcepath):\n logging.info(\"create new dir %s\" % newsourcepath)\n os.makedirs(newsourcepath)\n newpath2 = os.path.join(newsourcepath, basename)\n print(\"fullbasename %s\" % fullbasename)\n print(\"newdirname %s\" % newdirname)\n print(\"newsourcepath %s\" % newsourcepath)\n \n t1 = format(\"gcomp -d -v -F %s -D %s\" % (filename, newsourcepath))\n result = subprocess.check_call(['cmd.exe', '/C', t1])\n\n #изменим кодировку cp1251 на utf-8 \n #утилита iconv.exe должна запускаться в cmd = добавлена в PATH\t\t\t\n #файлов 1s, mdp, frm, txt\n t3 = 'bash .git/hooks/convert_utf8.sh {0}'.format( newsourcepath )\n print(\"t3 = %s\" % t3)\n logging.info(\"CONVERT: %s\" % t3)\n result = subprocess.check_call(['cmd.exe', '/C', t3])\n\n #result = subprocess.check_call(['git', 'add', '--all', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.1s', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.frm', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.mxl', newsourcepath])\n result = subprocess.check_call(['git', 'add', '*.utf', newsourcepath])\n if not result == 0:\n logging.error(result)\n exit(result)", "def delete_file(self, name, container):\r\n try:\r\n cnt = self.get_container(container)\r\n obj = cnt.get_object(name)\r\n obj.delete()\r\n return True\r\n except:\r\n return False", "def remove(self):\n self.remove_file()", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def test_buildRemovesTemporaryLaTeXFiles(self):\n sections = range(1, 4)\n for sectionNumber in sections:\n self.howtoDir.child(\"%d.xhtml\" % (sectionNumber,)).setContent(\n self.getArbitraryLoreInput(sectionNumber))\n bookTeX = self._setupTeXBook(sections)\n bookPDF = FilePath(self.mktemp())\n\n builder = BookBuilder()\n builder.build(self.howtoDir, [self.howtoDir], bookTeX, bookPDF)\n\n self.assertEqual(\n set(self.howtoDir.listdir()),\n set([bookTeX.basename()] + [\"%d.xhtml\" % (n,) for n in sections]))", "def cleanUp(self, f):\n os.system('rm ' + f)", "def clean(path=None):\n conf.load(path)\n logger.info('cleaning output...')\n helpers.rmdir(conf.get('build_path'))\n logger.info('done')", "def delete_file(input_fn):\r\n if os.path.isfile(input_fn):\r\n os.remove(input_fn)", "def process(self):\n\n self.copy_supported_files()\n tex = self.replace_includes(file_name=self.source_tex_file)\n\n try:\n tex = self.replace_bibliography(tex)\n except Exception, ex:\n print '[e] exception {}'.format(str(ex))\n print '[i] bibliography was not replaced - run firstly \"build-latex-win\\\\_build.bat\" for bbl file'\n\n self.dir_helper.save_file(file_name=self.dest_tex_file, text=tex)\n self.create_bat_file()\n\n print '\\n[i] publication is ready - IN CASE NO ERROR ACCURED'\n print '[i] check foloder: \"{}\"\\n'.format(self.temp_dir)", "def test_remove(self):\n reposDir = self.makeRepository(self.tmpDir)\n testFile = reposDir.child(\"some-file\")\n testFile.setContent(b\"something\")\n self.commitRepository(reposDir)\n self.assertTrue(testFile.exists())\n\n self.createCommand.remove(testFile)\n testFile.restat(False) # Refresh the file information\n self.assertFalse(testFile.exists(), \"File still exists\")", "def cleanup(self):\n\n # check if the directory exists\n if not os.path.exists(self.path):\n return\n\n # check if the directory is a directory\n if not os.path.isdir(self.path):\n return\n\n # loop over content of directory and remove it\n for the_file in os.listdir(self.path):\n file_path = os.path.join(self.path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n pass", "def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)", "def main():\n if os.path.exists(\"docs\"):\n try:\n shutil.move(\"docs\", \"html\")\n except FileNotFoundError as err:\n print(err)\n\n if os.path.exists(\"html\"):\n try:\n native_cmd(\"html\\make.bat html\")\n\n except Exception as err:\n print(err)\n\n try:\n shutil.move(\"html\", \"docs\")\n except FileNotFoundError as err:\n print(err)", "def test() -> None:\n docx2python(\"resources/example.docx\")", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def delete(self):\n if self.is_running:\n raise errors.ChalmersError(\"Can not remove running program (must be stopped)\")\n\n if path.isfile(self.definition_filename):\n os.unlink(self.definition_filename)\n\n if path.isfile(self.state_filename):\n os.unlink(self.state_filename)", "def delete(self, filename):\n pass", "def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass", "def teardown(self):\n super(TestCisObjOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def remove_file(self, path):\n pass", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def Destroy(self, *args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Destroy(self, *args)" ]
[ "0.65258646", "0.63766783", "0.62430525", "0.6167361", "0.5848033", "0.57737476", "0.5623585", "0.5583894", "0.5582149", "0.5558321", "0.5547012", "0.55358094", "0.553127", "0.5530632", "0.5524219", "0.5511022", "0.54836005", "0.5472407", "0.5460167", "0.53966826", "0.53966826", "0.53895587", "0.5382919", "0.53824633", "0.53743327", "0.53707415", "0.53637016", "0.53601897", "0.5357064", "0.5347869", "0.53025913", "0.5297966", "0.5279094", "0.52684706", "0.5267001", "0.5264792", "0.52328026", "0.5230032", "0.5225895", "0.5222929", "0.5212038", "0.5193036", "0.51803595", "0.51769876", "0.5175859", "0.5170355", "0.5154868", "0.51472163", "0.51448005", "0.51444805", "0.51443756", "0.51430357", "0.5131076", "0.51261294", "0.5123208", "0.5120937", "0.51190627", "0.51070076", "0.5096183", "0.50827724", "0.5077676", "0.5072062", "0.5067842", "0.5062707", "0.50626266", "0.50617975", "0.50610477", "0.5058231", "0.50569457", "0.5056352", "0.5051475", "0.5051061", "0.5050663", "0.50484765", "0.50478995", "0.5041954", "0.5038049", "0.5035857", "0.50086856", "0.5008216", "0.5001835", "0.49995995", "0.49978715", "0.4986488", "0.49833953", "0.49812177", "0.4979659", "0.49701467", "0.49623886", "0.49592796", "0.49548835", "0.4952686", "0.4952638", "0.49503967", "0.49447477", "0.49408072", "0.49337867", "0.4928287", "0.4921359", "0.4920257" ]
0.73595536
0
Read ascii file to get weather info
def read_weather(self): print "Reading weather data from file",self.datafile tab = ascii.read(self.datafile) # Fix 'T' values in precipitation column, which represent tiny # amounts of rain (not measurable) TINY_VALUE = '.005' # 0.005 is half the smallest measurable value rain = tab['PrecipitationIn'] wbad = (rain == 'T') rain[wbad] = TINY_VALUE rain = numpy.array(rain).astype("float") # Replace string version of precip with float version tab['PrecipIn'] = rain tab.remove_column('PrecipitationIn') self.table = tab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref", "def read_imp_ASCII(filename):\n\n # create a temporary directory\n tmpDir = tempfile.mkdtemp()\n\n # unzip filename to tmpDir\n with zipfile.ZipFile(filename, 'r') as inZip:\n inZip.extractall(tmpDir)\n\n # set filenames\n dt_file = os.path.join(tmpDir, 'DateTime.txt')\n location_file = os.path.join(tmpDir, 'LatLon.txt')\n bx_file = os.path.join(tmpDir, 'BX.txt')\n by_file = os.path.join(tmpDir, 'BY.txt')\n bz_file = os.path.join(tmpDir, 'BZ.txt')\n obx_file = os.path.join(tmpDir, 'obsBX.txt')\n oby_file = os.path.join(tmpDir, 'obsBY.txt')\n obz_file = os.path.join(tmpDir, 'obsBZ.txt')\n station_file = os.path.join(tmpDir, 'Stations.txt')\n\n DT = _read_antti_datetime(dt_file)\n\n Lat, Lon, Rad, Label = _read_antti_location(location_file)\n\n BX = _read_antti_component(bx_file)\n BY = _read_antti_component(by_file)\n BZ = _read_antti_component(bz_file)\n\n obsX = _read_antti_component(obx_file)\n obsY = _read_antti_component(oby_file)\n obsZ = _read_antti_component(obz_file)\n\n obsLat, obsLon, obsRad, obsInc, obsID = _read_antti_stations(station_file)\n\n shutil.rmtree(tmpDir)\n\n return (DT, (Lat, Lon, Rad), BX, BY, BZ, Label,\n (obsLat, obsLon, obsRad), obsX, obsY, obsZ, obsInc, obsID)", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def read():\n # TODO", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def read_from_file(self, filename: str) -> None:", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def read_weather(self, path='../datasets/McClear/s7_clr_data_17-19.csv'):\n s_clr = pd.read_csv(path)\n times = pd.date_range('03-16-2018 16:00', freq='15min', periods=96*2, tz=\"UTC\") #\n weather = pd.DataFrame(columns=['ghi', 'dni', 'dhi'], index=times)\n weather['dni'] = np.array(s_clr['BNI'])\n weather['ghi'] = np.array(s_clr['GHI'])\n weather['dhi'] = np.array(s_clr['DHI'])\n return weather", "def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True", "def read_data(self, loc):\n pass", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def wac_to_dict(file_path: str) -> dict:\n\n weather_dict = {'longitude': '',\n 'latitude': '',\n 'altitude': '',\n 'time': [],\n 'temperature': [],\n 'relative_humidity': [],\n 'horizontal_global_solar_radiation': [],\n 'diffuse_horizontal_solar_radiation': [],\n 'air_pressure': [],\n 'vertical_rain': [],\n 'wind_direction': [],\n 'wind_speed': [],\n 'cloud_index': [],\n 'atmospheric_counter_horizontal_long_wave_radiation': [],\n 'atmospheric_horizontal_long_wave_radiation': [],\n 'ground_temperature': [],\n 'ground_reflectance': []\n }\n\n file_obj = open(file_path, 'r')\n file_lines = file_obj.readlines()\n file_obj.close()\n\n weather_dict['longitude'] = float(file_lines[4].split('\\t')[0].strip())\n weather_dict['latitude'] = float(file_lines[5].split('\\t')[0].strip())\n weather_dict['altitude'] = float(file_lines[6].split('\\t')[0].strip())\n\n for line in file_lines[12:]:\n splitted_line = line.split('\\t')\n weather_dict['time'].append(datetime.datetime.strptime(splitted_line[0].strip(), '%Y-%m-%d %H:%M'))\n weather_dict['temperature'].append(float(splitted_line[1].strip()))\n weather_dict['relative_humidity'].append(float(splitted_line[2].strip()))\n weather_dict['horizontal_global_solar_radiation'].append(float(splitted_line[3].strip()))\n weather_dict['diffuse_horizontal_solar_radiation'].append(float(splitted_line[4].strip()))\n weather_dict['air_pressure'].append(float(splitted_line[5].strip()))\n weather_dict['vertical_rain'].append(float(splitted_line[6].strip()))\n weather_dict['wind_direction'].append(float(splitted_line[7].strip()))\n weather_dict['wind_speed'].append(float(splitted_line[8].strip()))\n weather_dict['cloud_index'].append(float(splitted_line[9].strip()))\n weather_dict['atmospheric_counter_horizontal_long_wave_radiation'].append(float(splitted_line[10].strip()))\n weather_dict['atmospheric_horizontal_long_wave_radiation'].append(float(splitted_line[11].strip()))\n weather_dict['ground_temperature'].append(float(splitted_line[12].strip()))\n weather_dict['ground_reflectance'].append(float(splitted_line[13].strip()))\n\n return weather_dict", "def read_file(log_file):\n\t\tfile = open(log_file, 'r')\n\t\tresult = []\n\t\twhile 1:\n\t\t\tcontent = file.readline()\n\t\t\tif not content:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tdata = content.split(\"\\003\")\n\t\t\t\tif len(data) == 13:\n\t\t\t\t\ttrack = {\n\t\t\t\t\t\t\t 'device_no' : long(data[0][3:]), 'sim' : data[1], 'type':int(data[2]), 'gps_time' : MongoTrack.time_trans(data[3]),\n\t\t\t\t\t\t\t 'valid' : data[4], 'loc':{'long' : float(data[5]), 'lat' : float(data[6]) }, 'altitude' : float(data[7]),\n\t\t\t\t\t\t\t 'speed' : float(data[8]), 'course' : float(data[9]), 'km' : float(data[10]), 'para' : float(data[11]),\n\t\t\t\t\t\t\t 'rtime' : MongoTrack.time_trans(data[12].strip())\n\t\t\t\t\t\t\t}\n\t\t\t\t\tresult.append(track)\n\t\tfile.close()\n\t\treturn result", "def read_file(path_to_file):\n 8", "def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info", "def read(path):", "def __read():\n f = file(constellation_data_path)\n constellations = []\n for line in f:\n tokens = line.split()\n if not tokens: continue\n hip_numbers = [int(t) for t in tokens[2:]]\n element = tokens[0], zip(hip_numbers[::2], hip_numbers[1::2])\n constellations.append(element)\n f.close()\n return constellations", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def readTempSensor(sensorName) :\n f = open(sensorName, 'r')\n lines = f.readlines()\n f.close()\n return lines", "def open_and_read_file():\n file_path = sys.argv[1]\n #print file_path\n file_data = open(file_path, 'r')\n gettysburg = file_data.read()\n\n return gettysburg", "def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes", "def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])", "def _read_antti_stations(station_file):\n if station_file.split('.')[-1] == 'gz':\n ff = gzip.open(station_file, 'r')\n else:\n ff = open(station_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # extract and convert single line with observatory IDs\n obsList = []\n llList = []\n incList = []\n nObs = 0\n nLL = 0\n nInc = 0\n for line in sIO:\n if re.search(b\"^%\", line):\n # skip comments\n continue\n\n if re.search(br\"^\\s*$\", line):\n # skip blank lines\n continue\n\n # first line of consequence should be a list of quoted strings holding\n # observatory IDs for observatories considered in this solution; convert\n # to a list of strings\n if len(obsList) == 0:\n obsList = re.sub(b'\\'', b'', line).split()\n nObs = len(obsList)\n continue\n\n # assume next nobs lines read are observatory locations\n if nLL < nObs:\n llList.append([float(elem) for elem in line.decode().split()])\n nLL = nLL+1\n continue\n\n # assume next nobs lines read are observatory inclusion (boolean) lists\n if nInc < nObs:\n #incList.append(line.strip())\n incList.append([int(elem) for elem in line.decode().strip()])\n nInc = nInc+1\n continue\n\n # close sIO\n sIO.close()\n\n if len(llList) > 2:\n obsLat, obsLon, obsRad = list(zip(*llList))\n elif len(llList) == 2:\n obsLat, obsLon = list(zip(*llList))\n obsRad = np.ones(obsLat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n obsInc = list(zip(*incList))\n\n return (np.array(obsLat), np.array(obsLon), np.array(obsRad),\n np.array(obsInc), np.array(obsList))", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def _extract_raw_data(self, lines):\r\n\r\n i = self._find_first_data_point(lines)\r\n if self._lines_have_temperature(lines[i]):\r\n self._T = []\r\n\r\n if self._has_drift_points(lines):\r\n while i < len(lines) and lines[i][0] in ['+', '-']:\r\n self._extract_drift_point(lines[i])\r\n i += 2\r\n i += self._extract_next_forc(lines[i:])\r\n i += 1\r\n else:\r\n while i < len(lines) and lines[i][0]in ['+', '-']:\r\n i += self._extract_next_forc(lines[i:])\r\n self._extract_drift_point(lines[i-1])\r\n i += 1\r\n\r\n return", "def read_FMI_weather(ID, start_date, end_date, sourcefile, CO2=380.0):\n \n # OmaTunniste;OmaItä;OmaPohjoinen;Kunta;siteid;vuosi;kk;paiva;longitude;latitude;t_mean;t_max;t_min;\n # rainfall;radiation;hpa;lamposumma_v;rainfall_v;lamposumma;lamposumma_cum\n # -site number\n # -date (yyyy mm dd)\n # -latitude (in KKJ coordinates, metres)\n # -longitude (in KKJ coordinates, metres)\n # -T_mean (degrees celcius)\n # -T_max (degrees celcius)\n # -T_min (degrees celcius)\n # -rainfall (mm)\n # -global radiation (per day in kJ/m2)\n # -H2O partial pressure (hPa)\n\n sourcefile = os.path.join(sourcefile)\n\n #ID = int(ID)\n\n # import forcing data\n fmi = pd.read_csv(sourcefile, sep=';', header='infer', \n usecols=['OmaTunniste', 'Kunta', 'aika', 'longitude',\n 'latitude', 't_mean', 't_max', 't_min', 'rainfall',\n 'radiation', 'hpa', 'lamposumma_v', 'rainfall_v'],\n parse_dates=['aika'],encoding=\"ISO-8859-1\")\n \n time = pd.to_datetime(fmi['aika'], format='%Y%m%d')\n\n fmi.index = time\n fmi = fmi.rename(columns={'OmaTunniste': 'ID', 'longitude': 'lon',\n 'latitude': 'lat', 't_mean': 'T', 't_max': 'Tmax',\n 't_min': 'Tmin', 'rainfall': 'Prec',\n 'radiation': 'Rg', 'hpa': 'h2o', 'lamposumma_v': 'dds',\n 'rainfall_v': 'Prec_a'})\n \n fmi['h2o'] = 1e-1*fmi['h2o'] # hPa-->kPa\n fmi['Rg'] = 1e3 / 86400.0*fmi['Rg'] # kJ/m2/d-1 to Wm-2\n fmi['Par'] = 0.5*fmi['Rg']\n\n # saturated vapor pressure\n esa = 0.6112*np.exp((17.67*fmi['T']) / (fmi['T'] + 273.16 - 29.66)) # kPa\n vpd = esa - fmi['h2o'] # kPa\n vpd[vpd < 0] = 0.0\n rh = 100.0*fmi['h2o'] / esa\n rh[rh < 0] = 0.0\n rh[rh > 100] = 100.0\n\n fmi['RH'] = rh\n fmi['esa'] = esa\n fmi['VPD'] = vpd\n\n fmi['doy'] = fmi.index.dayofyear\n fmi = fmi.drop(['aika'], axis=1)\n # replace nan's in prec with 0.0\n #fmi['Prec'][np.isnan(fmi['Prec'])] = 0.0\n fmi['Prec']= fmi['Prec'].fillna(value=0.0)\n # add CO2 concentration to dataframe\n fmi['CO2'] = float(CO2)\n \n # get desired period\n fmi = fmi[(fmi.index >= start_date) & (fmi.index <= end_date)]\n# if ID > 0:\n# fmi = fmi[fmi['ID'] == ID]\n return fmi", "def read_metar_ZA(metar_url, date_as_ISO_text=False):\n\n \n metar_list = [] # The list of dictionaries that will be returned, containing METAR data\n \n # Regular expressions to extract the wind\n re_wind_no_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})KT') # 10005KT\n re_wind_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})G(?P<gust>[0-9]{2,2})KT') # 10005G15KT\n re_wind_variable = re.compile(r'(?P<direction>VRB)(?P<spd>[0-9]{2,2})KT') # VRB05KT\n re_no_data = re.compile(r'No Data For (?P<missing>[A-Z,a-z]{4,4})', re.IGNORECASE) # No data for FAGC\n re_temp = re.compile(r' (?P<temp>[M]?[0-9]{2,2})+/(?P<dewpt>[M]?[0-9]{2,2}) ') #temp in format 20/12 or 20/M02 or M03/M10 etc. \n re_qnh = re.compile(r'Q(?P<qnh>[0-9]{3,4})')\n \n \n # Retrieve the webpage containing METAR data\n try:\n r = requests.get(metar_url, verify=False)\n except:\n current_app.logger.error(f\"Error retrieving METAR - failed at REQUESTS call\")\n return None\n \n \n # If error retrieving page, return None\n if r.status_code != 200: \n current_app.logger.error(f\"Error retrieving METAR: URL = {metar_url}: {r.status_code} - {r.reason}\")\n return None\n \n # Setup Beautiful Soup, and extract all the \"PRE\" tags - these are where the METAR data is stored\n soup = BeautifulSoup(r.text, 'html.parser')\n mets = soup.find_all('pre')\n \n #Connect to DB\n sess = sqa_session()\n \n # Loop through the individual METAR\n for met in mets:\n \n # Get just the text. Sould be: similar to: 'View DecodedMETAR FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG='\n met_string = str(met.text)\n \n is_speci = False # Is this a SPECI and not a METAR - default to False\n is_correction = False #Is this METAR a correction of an earlier (i.e. 'METAR COR xxxxxxxxx')\n \n # Determine if this is a METAR, a SPECI, or a line to be ignored\n s = met_string.find('METAR') # Is it a METAR?\n \n # If text not found, this is not a METAR - is it a SPECI?\n if s < 0:\n s = met_string.find('SPECI') # Is it a SPECI\n\n if s >= 0: # It is a speci\n is_speci = True\n \n else: # It's not a SPECI either, so continue to the next element\n continue\n\n s += 5 # 5 is the length of the text METAR and SPECI - we want to remove this.\n # Remove METAR/SPECI text - we should now have the raw METAR/SPECI only (eg. 'FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG=')\n met_string = met_string[s:].strip()\n \n # If this METAR is a Correction, then flag and remove the 'COR ' (eg: METAR COR FAHS 011200Z AUTO 30009KT 34/02 Q1017=\n if met_string[:4] == 'COR ':\n is_correction = True\n met_string = met_string[4:]\n \n # Extract aerodrome name\n aerodrome = met_string[:4]\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Get the date and time\n day = int(met_string[5:7])\n hr = int(met_string[7:9])\n mn = int(met_string[9:11])\n \n met_date = calc_metar_taf_date(day, hr, mn)\n \n #Get the winds\n wind_variable = False # Wind defaults to not light and variable\n wind_gust = 0 # Gust defaults to 0\n no_wind = False #Is there no wind data avail (i.e. /////KT)\n \n \n #Check whether there is now wind specified (i.e. /////KT)\n if met_string.find('///KT') > 0:\n no_wind = True\n wind_dir = 0\n wind_spd = 0\n else:\n \n # Use regular expression to try to extract non-gusting wind (eg. 10010KT)\n tmp = re_wind_no_gust.search(met_string)\n if tmp:\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n except:\n current_app.logger.error(f\"Error passing METAR winds: {met_string}\")\n \n # Use regular expression to try to extract gusting wind (eg. 10010G15KT)\n elif re_wind_gust.search(met_string):\n tmp = re_wind_gust.search(met_string)\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n wind_gust = tmp.group('gust')\n except:\n current_app.logger.error(f\"Error passing METAR wind GUSTING: {met_string}\")\n \n # Use regular expression to try to extract variable wind (eg. VRB02KT)\n elif re_wind_variable.search(met_string):\n tmp = re_wind_variable.search(met_string)\n try:\n wind_dir = -1\n wind_spd = tmp.group('spd')\n wind_variable = True\n except:\n current_app.logger.error(f\"Error passing METAR wind VARIABLE: {met_string}\")\n\n # Use regular expression to try to extract Temp and Dewpoint (eg. 25/M02)\n temperature = 0\n dew_point = 0\n\n tmp = re_temp.search(met_string)\n if tmp:\n try:\n temperature = int(tmp.group('temp').replace('M','-'))\n dew_point = int(tmp.group('dewpt').replace('M','-'))\n except:\n current_app.logger.error(f\"Error passing METAR temperature: {met_string}\")\n\n\n # Use regular expression to try to extract QNH (eg. Q1025)\n qnh = 1013\n \n tmp = re_qnh.search(met_string)\n if tmp:\n try:\n qnh = tmp.group('qnh')\n except:\n current_app.logger.error(f\"Error passing METAR QNH: {met_string}\")\n \n if date_as_ISO_text == True:\n met_date = datetime.isoformat(met_date)\n \n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude), \n 'has_no_data': False , 'is_speci': is_speci, 'is_correction': is_correction, 'time': met_date, \n 'wind': {'no_wind_data': no_wind, 'direction': wind_dir, 'speed': wind_spd, 'gusting': wind_gust, 'is_variable': wind_variable}, #(wind_dir, wind_spd, wind_gust, wind_variable) , \n 'temperature': temperature, 'dew_point': dew_point,\n 'qnh': qnh,\n 'body': met_string}\n \n metar_list.append(met_dict)\n \n # Check for any stations with no data - search the whole page\n aero_no_datas = re_no_data.findall(soup.text)\n # If there are stations with no data, iterate through them\n if aero_no_datas:\n for aerodrome in aero_no_datas:\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Add a disctionary item\n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude) , \n 'has_no_data': True, 'body': f'No data for {aerodrome}'}\n \n metar_list.append(met_dict)\n\n return metar_list", "def _read_input_file(self):\n file_type = 'np.array'\n with open(self._file_properties['file_name'], 'r') as in_file:\n for line in in_file.readlines():\n if line[0:5] == '$$SOE':\n file_type = 'Horizons'\n break\n\n if not isfile(self._file_properties['file_name']):\n msg = 'Horizons files {:} does not exists.'\n message = msg.format(self._file_properties['file_name'])\n raise FileExistsError(message)\n if file_type == 'Horizons':\n self._read_horizons_file()\n else:\n (time, x, y, z) = np.loadtxt(\n self._file_properties['file_name'],\n usecols=(0, 1, 2, 3), unpack=True)\n self._time = time\n if int(astropy_version[0]) >= 4:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation_type='cartesian')\n else:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation='cartesian')", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def _get_temp_raw(self):\n try: \n f = open(self.device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n except: \n print(\"ERROR: w1_slave file could not be opened (temp sensor)\")", "def readobservationfile(observationdatafile=FileSettings.settingsdict['observationdatafile']):\r\n with open(observationdatafile, 'r') as obs_file:\r\n global contents\r\n contents = obs_file.readlines()\r\n global obs_data, time_difference, obs_time\r\n obs_data = []\r\n obs_time = []\r\n for line in contents:\r\n linelist = list(line)\r\n if linelist[0] == ';' or linelist[0] == ' ' or len(list(line)) < 15:\r\n continue\r\n else:\r\n templine = line.split()\r\n if float(templine[-1]) < 0:\r\n obs_data.append(0)\r\n else:\r\n obs_data.append(float(templine[-1]))\r\n day_templine_preprocessing = line.replace(' ', ';')\r\n day_templine = re.split('[/|;|:|\\t]', day_templine_preprocessing)\r\n month = int(day_templine[0])\r\n day = int(day_templine[1])\r\n year = int(day_templine[2])\r\n hour = int(day_templine[3])\r\n minute = int(day_templine[4])\r\n second = int(day_templine[5])\r\n if day_templine[6] == 'PM' and hour != 12:\r\n hour = hour + 12\r\n elif day_templine[6] == 'AM' and hour == 12:\r\n hour = 0\r\n obs_time.append(datetime.datetime(year, month, day, hour, minute, second))\r\n time_difference = obs_time[1] - obs_time[0]\r\n return time_difference", "def get_weather_data(filename, dates, highs, lows, date_index, high_index,\n low_index):\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n # Get data temp.\n for row in reader:\n current_date = datetime.strptime(row[date_index], '%Y-%m-%d')\n try:\n high = int(row[high_index])\n low = int(row[low_index])\n except ValueError:\n print(f\"No data for {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)", "def read(self, infname):\n InArr = np.loadtxt(infname)\n inlon = InArr[:,0]\n inlat = InArr[:,1]\n inZ = InArr[:,2]\n self.mask = ~self.mask\n for i in xrange(inlon.size):\n if i%10000==0: print i\n lon=inlon[i]\n if lon < 0: lon+=360\n lat=inlat[i]\n index = np.where((self.lonArr==lon)*(self.latArr==lat))\n if inZ[i]==0 or math.isnan(inZ[i]): continue\n self.mask[index[0], index[1]]=False\n self.Zarr[index[0], index[1]]=inZ[i]\n return", "def read_weather_data(path):\n df = pd.read_csv(path, compression='infer', header=None, index_col=False,\n names=['station_id',\n 'measurement_date',\n 'measurement_type',\n 'measurement_flag',\n 'quality_flag',\n 'source_flag',\n 'observation_time'],\n parse_dates=['measurement_date'])\n df = df[pd.isna(df['quality_flag'])]\n weather_data_subset = df[df.measurement_type.isin(['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN'])][[\n 'station_id', 'measurement_date', 'measurement_type', 'measurement_flag']]\n return weather_data_subset", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))", "def read_py_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data-py/Data/' + self.city[2] + '-py.data.bz2'\n self.dataset = loadtxt(fname, skiprows=0,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(4, 3, 5, 2), delimiter=';', comments='*')", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def loadascii(self, filename=None):\n data = np.loadtxt(filename)\n if len(data.shape) == 1:\n self.flux = data\n elif len(data.shape) == 2:\n self.wavelength = data[:,0]\n self.flux = data[:,1]", "def read_FMI_weatherdata(forcfile, fyear,lyear, asdict=False):\n \n #OmaTunniste;OmaItä;OmaPohjoinen;Kunta;siteid;vuosi;kk;paiva;longitude;latitude;t_mean;t_max;t_min;\n #rainfall;radiation;hpa;lamposumma_v;rainfall_v;lamposumma;lamposumma_cum\n #-site number\n #-date (yyyy mm dd)\n #-latitude (in KKJ coordinates, metres)\n #-longitude (in KKJ coordinates, metres)\n #-T_mean (degrees celcius)\n #-T_max (degrees celcius)\n #-T_min (degrees celcius)\n #-rainfall (mm)\n #-global radiation (per day in kJ/m2)\n #-H2O partial pressure (hPa)\n\n from datetime import datetime\n #forcfile='c:\\\\pyspace\\\\DATAT\\\\Topmodel_calibr\\\\FMI_saa_Porkkavaara.csv'\n\n #import forcing data\n dat=np.genfromtxt(forcfile,dtype=float,delimiter=';', usecols=(5,6,7,10,11,12,13,14,15,16))\n\n fi=np.where(dat[:,0]>=fyear); li=np.where(dat[:,0]<=lyear)\n ix=np.intersect1d(fi,li); #del fi, li\n #print min(ix), max(ix), np.shape(ix)\n tvec=dat[ix,0:3] #YYYY MM DD\n\n dat=dat[ix, 3:] \n\n time=[]; doy=[]\n for k in range(0,len(tvec)):\n time.append(datetime( int(tvec[k,0]), int(tvec[k,1]), int(tvec[k,2]), 0, 0) )\n doy.append(time[k].timetuple().tm_yday)\n \n time=np.array(time)\n doy=np.array(doy)\n \n Ta=dat[:,0];Tmax=dat[:,1]; Tmin=dat[:,2]; Prec=dat[:,3]; Rg=1e3*dat[:,4]/86400.0; Par=Rg*0.5 #from kJ/m2/d-1 to Wm-2 \n e=1e-1*dat[:,5]; #hPa-->kPa\n dds=dat[:,6] #temperature sum\n\n #saturated vapor pressure \n esa=0.6112*np.exp((17.67*Ta)/ (Ta +273.16 -29.66)) #kPa\n vpd=esa - e; #kPa \n vpd[vpd<0]=0.0\n rh=100.0*e/esa;\n rh[rh<0]=0.0; rh[rh>100]=100.0\n \n F={'Ta':Ta, 'Tmin':Tmin, 'Tmax':Tmax, 'Prec':Prec, 'Rg':Rg, 'Par': Par, 'VPD':vpd, 'RH':rh, 'esa':esa, 'h2o':e, 'dds':dds}\n\n F['time']=time\n F['doy']=doy\n \n ix=np.where(np.isnan(F['Prec'])); \n F['Prec'][ix]=0.0\n #del dat, fields, n, k, time\n \n if asdict is not True:\n #return pandas dataframe\n F=pd.DataFrame(F)\n cols=['time', 'doy', 'Ta', 'Tmin','Tmax', 'Prec', 'Rg', 'Par', 'VPD', 'RH', 'esa', 'h2o', 'dds']\n F=F[cols]\n return F", "def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data", "def read_w1(onewire_temperature_c, sensor_mappings):\n base_dir = '/sys/bus/w1/drivers/w1_slave_driver/'\n\n # Get our device:\n path_mappings = {}\n for (directory, dirs, files) in os.walk(base_dir):\n for dev_dir in dirs:\n try:\n #id_file = open('{0}/{1}/id'.format(base_dir, dev_dir), 'r')\n #id_val = id_file.read().encode('hex').upper()\n id_val = dev_dir\n #id_file.close()\n therm_file = open('{0}/{1}/w1_slave'.format(base_dir, dev_dir), 'r')\n path_mappings[id_val] = therm_file\n except (OSError, IOError) as e:\n print('Skipping {0} due to error: {1}'.format(dev_dir, str(e)), file=sys.stderr)\n break\n\n while 1:\n for device_id, therm_file in path_mappings.items():\n therm_contents = therm_file.read()\n therm_file.seek(0)\n\n m = re.search(r't=(-?\\d+)$', therm_contents)\n if m:\n temperature = (float(m.group(1)) / 1000)\n # A reading of 85000 seems to mean \"it's not working\". If you actually want to\n # measure things that are 85°C, then my apologies.\n if temperature != 85:\n onewire_temperature_c.labels(location=sensor_mappings[device_id]).set(temperature)\n\n time.sleep(1)", "def readSanchez(file, planet, species, state):\n # open and read whole file\n f = open(file, 'rU')\n lines = f.readlines()\n f.close()\n\n # start reading planet files for specific mol and state \n parsing = False\n for i in np.arange(len(lines)):\n if lines[i].startswith(planet):\n parsing = True\n elif lines[i].startswith('\\t\\t\\t'):\n parsing = False\n if parsing and lines[i].startswith(species + '\\t' + state):\n data = lines[i].split()\n\n return data", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def read_input_file(filename=\"wires.txt\"):\n raw_wires = []\n with open(filename, 'r') as wirefile:\n raw_wires = wirefile.readlines()\n return raw_wires", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def read_rain_gauge_sunnyside_school():\n\twith urllib.request.urlopen('https://raw.githubusercontent.com/selassid/codeguild/master/sunnyside.rain') as sunnyside_rain:\n\t\traw_unparsed_byte_stream_url_source = [byte_line.decode('utf-8') for byte_line in sunnyside_rain]\n\t# end with block/close byte stream\n\treturn raw_unparsed_byte_stream_url_source", "def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i", "def readtle(file, catalog=np.zeros(1)):\n # Adapted from Mr. Brett Pantalone's MATLAB readtle function.\n # INPUTS:\n # file - Path to any standard two-line element file.\n # catalog - Optional array of NORAD catalog numbers for the satellites of\n # interest. The default action is to display data from every\n # satellite in the file.\n \n # Brett Pantalone\n # mailto:[email protected]\n # http://research.ece.ncsu.edu/osl/\n if(catalog==np.zeros(1)):\n catalog = np.array([])\n \n try:\n fd = open(file)\n except(IOError):\n file = file[0:-3]+\".tle\"\n fd = open(file)\n \n assert(os.path.isfile('./'+file), \"File doesn''t exist in this directory.\")\n \n kiter = 0\n A0 = fd.readline().rstrip()\n A1 = fd.readline().rstrip()\n A2 = fd.readline().rstrip()\n oe = np.array([0, 0, 0, 0, 0, 0])\n epoch = np.array([0, 0])\n \n try:\n while(isinstance(A2,str)==1):\n kiter+=1\n satnum = np.array([float(A1[2:6])])\n if(catalog.size==0 or ismember(satnum,catalog)==1):\n if(kiter==1):\n print('-'*50)\n print('Satellite: %s' % A0)\n assert(chksum(A1), 'Checksum failure on line 1')\n assert(chksum(A2), 'Checksum failure on line 2')\n print(\"Catalog Number: %f\" % satnum)\n epochyear = np.array([float('20'+A1[18:20])])\n epochday = np.array([float(A1[20:31])])\n epoch = np.array([epochyear,epochday])\n print(\"Epoch time: %s\" % A1[18:31]) #YYDDD.DDDDDDDD\n inc = np.array([float(A2[8:15])])\n print(\"Inclination: %f deg\" % inc)\n raan = np.array([float(A2[17:24])])\n print(\"Right Ascension of the Ascending Node: %f deg\" % raan)\n ecc = np.array([float('.' + A2[26:32])])\n print(\"Eccentricity: %f\" % ecc)\n aop = np.array([float(A2[34:41])])\n print(\"Argument of perigee: %f deg\" % aop)\n M = np.array([float(A2[43:50])])\n print(\"Mean Anomaly: %f deg\" % M)\n n = np.array([float(A2[52:62])])\n print(\"Mean motion: %f rev/day\" % n)\n T = 86400/n;\n print(\"Period of rev: %.0f s/rev\" % T)\n a = (((T/(2*np.pi))**2)*3.986004e+14)**(1/3);\n print(\"Semimajor axis: %.0f meters\" % a)\n b = a*(1 - ecc**2)**(0.5)\n print(\"Semiminor axis: %.0f meters\" % b)\n oe = np.array([a, ecc, inc, raan, aop, M])\n elif(kiter>1):\n print('-'*50)\n print('Satellite: %s' % A0)\n assert(chksum(A1), 'Checksum failure on line 1')\n assert(chksum(A2), 'Checksum failure on line 2')\n print(\"Catalog Number: %f\" % satnum)\n epochyear = np.array([float('20'+A1[18:20])])\n epochday = np.array([float(A1[20:31])])\n epoch_new = np.array([epochyear,epochday])\n print(\"Epoch time: %s\" % A1[18:31]) #YYDDD.DDDDDDDD\n inc = np.array([float(A2[8:15])])\n print(\"Inclination: %f deg\" % inc)\n raan = np.array([float(A2[17:24])])\n print(\"Right Ascension of the Ascending Node: %f deg\" % raan)\n ecc = np.array([float('.' + A2[26:32])])\n print(\"Eccentricity: %f\" % ecc)\n aop = np.array([float(A2[34:41])])\n print(\"Argument of perigee: %f deg\" % aop)\n M = np.array([float(A2[43:50])])\n print(\"Mean Anomaly: %f deg\" % M)\n n = np.array([float(A2[52:62])])\n print(\"Mean motion: %f rev/day\" % n)\n T = 86400/n;\n print(\"Period of rev: %.0f s/rev\" % T)\n a = (((T/(2*np.pi))**2)*3.986004e+14)**(1/3);\n print(\"Semimajor axis: %.0f meters\" % a)\n b = a*(1 - ecc**2)**(0.5)\n print(\"Semiminor axis: %.0f meters\" % b)\n oe_new = np.array([a, ecc, inc, raan, aop, M])\n oe = np.concatenate((oe,oe_new), axis=1)\n epoch = np.concatenate((epoch,epoch_new),axis=1)\n A0 = fd.readline().rstrip()\n A1 = fd.readline().rstrip()\n A2 = fd.readline().rstrip()\n except:\n fd.close()\n return oe, epoch", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def ReadReachedSymbols(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f.readlines()]", "def f(filename):\n f = open(filename,\"r\",encoding=\"latin1\") # Note: this encoding could be wrong!\n all_data = f.read()\n f.close()\n print(\"\\n\\nall_data is\", all_data, \"\\n\\n\")\n return all_data[42:43] # just for fun", "def readascii(file_name):\n data = np.loadtxt(file_name)\n z = data[0,1:]\n nuInu = data[1:,1:]\n lmu = data[1:,0]\n return EBL(z, lmu, nuInu)", "def get_weather_data():\n get_pronto_data()\n zp = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zp.open('2015_weather_data.csv')\n return pd.read_csv(file_handle)", "def _read_horizons_file(self):\n # Read in the file\n self._get_start_end()\n data = np.genfromtxt(\n self._file_properties['file_name'],\n dtype=[('date', 'S17'), ('ra_dec', 'S23'), ('distance', 'f8'),\n ('foo', 'S23')],\n delimiter=[18, 29, 17, 24], autostrip=True,\n skip_header=self._file_properties['start_ind'] + 1,\n skip_footer=(self._file_properties['line_count'] -\n self._file_properties['stop_ind']))\n\n # Fix time format\n for (i, date) in enumerate(data['date']):\n data['date'][i] = Utils.date_change(date)\n\n # Currently we assume HORIZONS works in UTC.\n dates = [text.decode('UTF-8') for text in data['date']]\n self._time = Time(dates, format='iso', scale='utc').tdb.jd\n\n ra_dec = [text.decode('UTF-8') for text in data['ra_dec']]\n xyz = SkyCoord(\n ra_dec, distance=data['distance'], unit=(u.hourangle, u.deg, u.au))\n self._xyz = xyz.cartesian", "def acquire_data(city):\n\n filename = FILENAME_TEMPLATE.format(city)\n\n text = open(filename).read()\n lines = text.splitlines()\n\n data_lines = lines[1:]\n\n return data_lines", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\TvInfo\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def read_raw_file(file):\n print(\"Reading\", file)\n output = {}\n\n with open(file, \"r\") as fin:\n fin.readline()\n for line in fin:\n line_split = line.split(\",\")\n date = datetime.datetime.strptime(line_split[3], \"%Y-%m-%d %H:%M:%S\")\n line = [str(date), \"a_\" + line_split[1], \"r_\" + line_split[2], \"wd_\" + str(date.weekday())]\n if eval(line_split[0]) not in output:\n output[eval(line_split[0])] = []\n output[eval(line_split[0])].append(line)\n return output", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def read (cls, fileName):\n out = cls ()\n \n with open(fileName) as fid:\n for line in fid:\n line = line.strip()\n \n if line == 'ENERGY':\n cls._scan_energy (out, fid)\n elif line == 'TRANSMISSION':\n cls._scan_matrix (out, fid, 'TE_op')\n elif line == 'CURRENT':\n cls._scan_matrix (out, fid, 'I_op')\n elif line == 'DOS':\n cls._scan_matrix (out, fid, 'DOS_op')\n elif line == 'n':\n cls._scan_matrix (out, fid, 'n_op')\n elif line == 'neq':\n cls._scan_matrix (out, fid, 'neq_op')\n \n return out", "def read_ascii_raster(ascii_raster_file):\n import numpy as np\n\n with open(ascii_raster_file) as f:\n header_data = [float(f.next().split()[1]) for x in xrange(6)] #read the first 6 lines\n\n raster_data = np.genfromtxt(ascii_raster_file, delimiter=' ', skip_header=6)\n raster_data = raster_data.reshape(header_data[1], header_data[0]) #rows, columns\n\n return raster_data, header_data", "def read_data(self):\n temperature_data = RS485.read_temperature(self.data_path)\n humidity_data = RS485.read_humidity(self.data_path)\n moisture_data = RH_010_GN.read_moisture(self.data_path)\n o2_data = LB_856.read_o2(self.data_path)\n co2_data = LB_856.read_co2(self.data_path)\n\n self.data = [temperature_data, humidity_data, moisture_data, o2_data, co2_data]", "def read_py_instagram_data_full(self, date=None):\n print 'Reading Data ...'\n if date is None:\n fname = self.wpath + 'Data-py/instagram/' + self.city[2] + '-instg-f-twitter.data.bz2'\n else:\n fname = self.wpath + 'Data-py/instagram/' + self.city[2] + '-instg-f-twitter-' + date + '.data'\n self.dataset = loadtxt(fname, skiprows=0,\n dtype=[('twid', 'S25'), ('lat', 'f8'), ('lng', 'f8'), ('igurl', 'S100'), ('igid', 'S20'),\n ('iguname', 'S20')],\n usecols=(0, 1, 2, 5, 6, 7), delimiter=';', comments='*')", "def read_file(self,file_name):\r\n data = np.genfromtxt(file_name)\r\n return data;", "def read_file(self,fname):\n try:\n self.raw=spiketrain.read_file(fname)\n except Exception:\n self.raw=None\n raise", "def read_cycle_info(filename):\r\n \r\n # Open file and read it into a list of lines.\r\n fin = open(filename, \"r\")\r\n lines = fin.readlines()\r\n fin.close()\r\n \r\n info = [[]] * 256;\r\n\r\n for line in lines:\r\n fields = line.split(',')\r\n opc = int(fields[0],16)\r\n info[opc] = (int(fields[1]), int(fields[2]), int(fields[3]))\r\n return info", "def parse_davisweather(hex_str, port=None):\n\n b = bytes.fromhex(hex_str)\n val = struct.unpack('<BbHhBxhBBHBHBHHHHHHBHB', b) # Capital is unsigned, b 8bit h 16bit, x 8bit padding\n data = {\n # 0 int DavisDataCode 07\n # 1 data version 0\n # 2 uint16_t Current barometer as (Hg / 1000)\n # 3 int16_t Inside Temperature as (DegF / 10)\n # 4 uint8_t Inside Humidity as percentage\n # 5 int16_t Outside Temperature as (DegF / 10)\n # 6 uint8_t Wind Speed\n # 7 uint8_t 10-Minute Average Wind Speed\n # 8 uint16_t Wind Direction in degress\n # 9 uint8_t Outside Humidity\n # 10 uint16_t Rain Rate\n # 11 uint8_t UV Level\n # 12 uint16_t Solar Radiation\n # 13 uint16_t Total Storm Rain\n # 14 uint16_t Start date of current storm\n # 15 uint16_t Rain Today\n # 16 uint16_t Rain this Month\n # 17 uint16_t Rain this Year\n # 18 uint8_t Transmitter battery status\n # 19 uint16_t Console Battery Level:\n # 20 uint8_t Forecast Icon\n # 21 uint8_t Forecast rule number\n 'barometer': round(((val[2] / 1000) * 33.86389), 1),\n 'in_temperature': round((((val[3] / 10) - 32) / 1.8), 1),\n 'in_humity': val[4],\n 'out_temperature': round((((val[5] / 10) - 32) / 1.8), 1),\n 'windspeed': val[6],\n '10minwind': val[7],\n 'winddirection': val[8],\n 'out_humity': val[9],\n 'rain': val[10],\n 'raintoday': val[15],\n }\n return data", "def read_FIREX_ICT_file(path, FileName):\n # Setup a manual file reader for the ICT files.\n file2use = '{}/{}'.format(path, FileName)\n # Local variables\n HeaderLineStarts = 'Time_Start, Time_Stop, Day_Of_Year_YANG, Latitude_YANG'\n Year = 2019\n FirstDayOfYear = datetime.datetime(Year, 1, 1)\n DOYvar = 'Day_Of_Year_YANG'\n StartVar = 'Time_Start'\n # Extract file by reading line by line\n with open( file2use, 'r') as OpenedFile:\n\n # Read data after the head line has been read\n ReadDataHereOnwards = False\n data = []\n for line in OpenedFile:\n line = line.strip()\n # Extract data after header\n if ReadDataHereOnwards:\n data += [line.split(',')]\n # skip lines until header for data found\n if line.startswith(HeaderLineStarts):\n header = line.split(',')\n header = [i.strip() for i in header]\n ReadDataHereOnwards = True\n\n # Compile data and header into a pd.DataFrame\n df = pd.DataFrame(data, columns=header)\n # convert columns to floats where possible\n for col in df.columns:\n df.loc[:, col] = pd.to_numeric(df[col])\n\n # Update the index to be in datetime\n dates = []\n days = df[DOYvar].values\n for idx in df.index:\n day = df.loc[idx, DOYvar]\n seconds = df.loc[idx, StartVar]\n date = FirstDayOfYear + datetime.timedelta(int(day) - 1.0)\n date = AC.add_secs(date, seconds)\n dates += [date]\n df.index = dates\n return df", "def read_xtidefile(fid):\n l = fgetl_nocom(fid)\n ncon = sscanf(l, '\\n %d')\n xtide = type('struct', (), {})()\n for k in range(1, (ncon +1)):\n l = fgetl_nocom(fid)\n xtide.name(k, :) = l[0:8]\n xtide.speed(k) = sscanf(l[8:l.shape[0]], '\\n %f')\n xtide.startyear = sscanf(fgetl_nocom(fid), '\\n %d')\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.equilibarg(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.nodefactor(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n # Now read in all harmonic data\n #nsta=1754; \n # This is number of stations in harmonics (1998-07-18)\n #nsta=3351; \n # This is number of stations in v1.42 or harmonics file\n nsta = 3316\n # This is number in v1.51\n xharm = type('struct', (), {})()\n nh = 0\n while max(l.shape) > 0 & l[0] != - 1:\n\n l = l + ' '\n nh = nh + 1\n while not l[0:3] == '# !':\n\n l = fgetl(fid) + ' '\n\n while l[0:3] == '# !':\n\n if 'unit' == l[((3:7 -1) -1)]:\n tmp = deblank(l[(findstr(l, ':') + 2 -1):l.shape[0]])\n xharm.units(nh, range(1, (max(tmp.shape) +1))) = tmp\n else:\n if 'long' == l[((3:7 -1) -1)]:\n xharm.longitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n else:\n if 'lati' == l[((3:7 -1) -1)]:\n xharm.latitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n l = fgetl(fid)\n\n tmp = deblank(l)\n if tmp[0] != '#':\n # Not commented out\n xharm.station(nh, range(1, (max(tmp.shape) +1))) = tmp\n tmp = fgetl(fid)\n k = np.min(findstr(tmp, ':'))\n tim = sscanf(tmp[0:k - 1], '\\n %d') + sscanf(tmp[(k + np.array([range(1, 3)]).reshape(1, -1) -1)], '\\n %d') / 60\n xharm.timezone(nh) = tim\n tmp = fgetl(fid)\n xharm.datum(nh) = sscanf(tmp, '\\n %f')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n if l[0] != 'x':\n ll = np.min(np.array([findstr(' ', l), np.flatnonzero(abs(l) == 9)]).reshape(1, -1))\n # space or tab\n tmp = sscanf(l[(ll + 1 -1):l.shape[0]], '\\n %f', 2)\n xharm.A(nh, k) = tmp[0]\n xharm.kappa(nh, k) = tmp[1]\n l = fgetl(fid)\n else:\n nh = nh - 1\n if rem(nh, 50) == 0:\n fprintf('.')\n\n fprintf('\\\\n')\n # Convert internally to sparse matrix storage (much smaller).\n xharm.A = sparse(xharm.A)\n xharm.kappa = sparse(xharm.kappa)\n return xtide, xharm\n # \n return xtide, xharm", "def read(self, filename):\n raise NotImplementedError", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def __readData(self, f, nRows, nCols):\n # Efficiently allocate all the memory we'll need.\n data = numpy.empty( (nCols, nRows), float )\n\n # Import data from the LFM Solar Wind file\n rowIndex = 0\n for row in f.readlines():\n if len(row.split()) != nCols: continue\n\n for col, field in enumerate(row.split()):\n data[col, rowIndex] = field\n\n rowIndex += 1\n\n # Bad things can happen if the file header says there is more\n # (or less) data than there actually is within the file!\n assert(rowIndex == nRows)\n\n return data", "def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0", "def read_cleaned(file):\n wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref = [],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n lamFlam.append(float(line.strip().split(' ')[2]))\n elamFlam.append(line.strip().split(' ')[3])\n flamFlam.append(line.strip().split(' ')[4])\n beam.append(line.strip().split(' ')[5])\n odate.append(line.strip().split(' ')[6])\n ref.append(line.strip().split(' ')[7])\n \n return wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref", "def read_input():\n orbitDict = {}\n with open('day06_input.txt') as f:\n for line in f:\n planet, satellite = line.split(')')\n satellite = satellite.rstrip('\\n')\n\n if satellite in orbitDict:\n orbitDict[satellite].append(planet)\n else:\n orbitDict[satellite] = [planet]\n\n return orbitDict", "def read_file(filename):\n # Read in as nested dictionary\n # hipparcos_data = {'(star catalog number':\n # { 'parallax' : ... , 'apparent_magnitude' : ... , 'blue_minus_visual' : ... },\n # ... }\n\n return hipparcos_data", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read(self):", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def uadb_ascii_to_dataframe(file=''): \n \n if debug:\n print(\"Running uadb_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True) # TODO\n \n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n\n nmiss = 0\n search_h = False \n read_data = []\n \n usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n\n #usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd, iday, ident, numlev= 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n obs_id = 0\n stations_id = [] \n \n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n \n ident = int(line[15:21].replace(' ',''))# WMO\n if ident not in stations_id:\n stations_id.append(ident)\n \n #if len(ident) == 4:\n # ident = '0' + ident \n #idflag = int(line[22:24]) # id flag\n #d_src = int(line[25:28]) # source dataset\n #version = float(line[29:34]) # version\n #dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%02d\" % int(line[46:48])\n hour = line[49:53]\n #locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n #ele = float(line[79:85])\n #stype = int(line[86:88])\n numlev = int(line[89:93])\n #pvers = line[94:102]\n\n if '99' in hour:\n hour = hour.replace('99', '00')\n \n if '99' in day:\n search_h = True\n continue\n \n minutes = int(hour) % 100 \n hour = \"%02d\" % (int(hour) // 100)\n if minutes > 60 or minutes < 0:\n minutes = 0\n minutes = \"%02d\" % minutes\n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day)\n #pday = int(day)\n search_h = False\n\n except Exception as e:\n #print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n search_h = True\n #iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n #ltyp = int(line[0:4])\n p = float(line[5:13])\n \n if p != -99999.0 and p != 9999.9: \n press = float(line[5:13])*100 # converting to Pa, since P is given in mb (1 mb = 1 hPa) \n else:\n press = np.nan \n \n gph = float(line[14:22]) # gph [m]\n \n if gph == -999.0 or gph == -99999.00 or gph >= 99999.0:\n gph = np.nan\n \n temp = float(line[23:29])\n if temp == -999.0:\n temp = np.nan \n else:\n temp = temp + 273.15\n \n rh = float(line[30:36]) # %\n if rh == -999.0:\n rh = np.nan\n else:\n rh = rh / 100. # convert to absolute ratio TODO\n\n wdir = float(line[37:43]) \n if wdir == -999.0 or wdir == -999 :\n wdir = np.nan\n \n wspd = float(line[44:50]) # [m/s], module of the velocity\n if wspd <0 :\n wspd = np.nan \n \n try:\n \n for value,var in zip([ gph, temp, wspd, wdir, rh], [ 'gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity'] ):\n obs_id = obs_id +1\n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n else:\n z_type = -2147483648 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n \n except:\n 0\n \n \n \n #column_names = ['source_file', 'product_code', 'report_id', 'observation_id', 'report_timestamp' , 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body' , 'units', 'number_of_pressure_levels' ]\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n #df['observations_id'] =numpy.char.zfill(numpy.arange(ivar.shape[0]).astype('S10'), 10)\n \n df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n #df['report_id'] = numpy.int64 (df['report_id'] ) \n #df['observation_id'] = numpy.int64 (df['observation_id'] ) \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n print('Done reading DF')\n return df , stations_id", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')", "def readFastaFile(filename):", "def read_and_parse():\n\t# read\n\tline = D.gps_serial.readline()\n\n\t# break into components\n\tdata = line.split(\",\")\n\t#print data\n\t# identify and parse. Indicies are from datasheet \n\tif(data[0] == \"$GPGGA\"):\n\t\tgps_msg = RosGPS()\n\t\tif (data[1] != \"\"):\n\t\t\tgps_msg.gps_time = float(data[1])\n\t\tif (data[2] != \"\"):\n\t\t\tgps_msg.latitude = float(data[2])\n\t\tif (data[4] != \"\"):\n\t\t\tgps_msg.longitude = float(data[4])\n\t\tif (data[9] != \"\"):\n\t\t\tgps_msg.altitude = float(data[9])\n\t\tif (data[7] != \"\"):\n\t\t\tgps_msg.NSatellites = int(data[7])\n\t\t\n\t\tD.gpsPub.publish(gps_msg)", "def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T", "def open_igra_metadata(filename):\n import pandas as pd\n infos = \"\"\"\n IGRAID 1- 11 Character\n WMOID 13- 17 Integer\n NAME 19- 48 Character\n NAMFLAG 50- 50 Character\n LATITUDE 52- 60 Real\n LATFLAG 62- 62 Character\n LONGITUDE 64- 72 Real\n LONFLAG 74- 74 Character\n ELEVATION 76- 81 Real\n ELVFLAG 83- 83 Character\n YEAR 85- 88 Integer\n MONTH 90- 91 Integer\n DAY 93- 94 Integer\n HOUR 96- 97 Integer\n DATEIND 99- 99 Integer\n EVENT 101-119 Character\n ALTIND 121-122 Character\n BEFINFO 124-163 Character\n BEFFLAG 164-164 Character\n LINK 166-167 Character\n AFTINFO 169-208 Character\n AFTFLAG 209-209 Character\n REFERENCE 211-235 Character\n COMMENT 236-315 Character\n UPDCOM 316-346 Character\n UPDDATE 348-354 Character\n \"\"\"\n import numpy as np\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n elif it == 'Real':\n it = 'float'\n else:\n it = 'int'\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data", "def read_wx_data(wx_file, harbor_data):\n wx_data = pd.read_csv(wx_file) # a dataframe that holds the data from \"TempPressure.txt\"\n \n temp = list(wx_data[\"Time\"]) # a list of strings\n # Convert string time to float hours for easier plotting\n init_time = temp[0] # take first time which will be your time zero\n harbor_data[\"wx_times\"] = [] # list to hold the data\n for h_time in temp:\n delta_t = dt.strptime(h_time, '%H:%M:%S') - dt.strptime(init_time, '%H:%M:%S') # get delta time\n harbor_data[\"wx_times\"].append(float(delta_t.total_seconds()/3600)) # convert to hours\n\n harbor_data[\"wx_temperatures\"] = wx_data[\"Ch1:Deg F\"] # Places temperatures in harbor_data", "def _read_antti_location(location_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if location_file.split('.')[-1] == 'gz':\n ff = gzip.open(location_file, 'r')\n else:\n ff = open(location_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # read LatLon array (with optional labels...\n # either all have labels, or none, else genfromtxt() chokes)\n lll = list(zip(*np.atleast_1d(np.genfromtxt(\n sIO, comments=\"%\", dtype=None,\n names=['latReal','lonReal','radReal','labelString']\n ))))\n\n # handles older style(s) with no radius and/or labels\n if len(lll) > 3:\n lat, lon, rad = np.array(lll[0:3])\n label = np.array(lll[3])\n elif len(lll) > 2:\n lat, lon, rad = np.array(lll[0:3])\n if isinstance(rad[0], (str, bytes)):\n label = rad\n rad = np.ones(lat.shape)\n else:\n label = np.tile('', lat.shape)\n elif len(lll) == 2:\n lat, lon = np.array(lll[0:2])\n rad = np.ones(lat.shape)\n label = np.tile('', lat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n return lat, lon, rad, label", "def _get_data(self):\n with open(self.filename, 'r') as fid:\n # we are not interested in the first line\n fid.readline()\n # second line\n line = fid.readline().strip()\n # the temperature is written in milli-degrees in the form\n # t=23456, but preceeded by a large HEX data dump in the form\n # 2c 00 4b 46 ff ff 0e 10 17 t=21875\n index = line.find('t=') + 2\n temperature = int(line[index:index + 6]) / 1e3\n time_now = self.get_timestamp()\n\n logging.debug(\n 'w1_temp: {0}, datetime: {1}, logger_id: {2}'.format(\n temperature,\n time_now,\n self.logger_id))\n\n ins = self.table(value=temperature,\n logger_id=self.logger_id,\n datetime=time_now)\n\n self.session.add(ins)\n self.session.commit()", "def weather_of_wind(city):\n pattern = re.compile(r'.*(\\d+).*')\n\n time_index = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index = dict(time_index.tolist())\n numpy_res = np.empty((len(time_index['index']),))\n with open(exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_wind.csv'.format(city)) as f:\n reader = csv.reader(f)\n for line in reader:\n if '微' in line[1]:\n line[1] = 0\n else:\n line[1] = pattern.match(line[1]).group(1)\n numpy_res[int(line[0])] = int(line[1])\n\n file_name = exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_wind'.format(city)\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, numpy_res)\n pass", "def read_infile(infile):\n # There are a variable header lengths possible.\n # Loop through and look for when the line starts\n # with '1', the first index.\n nheader = 0\n try:\n with open(infile, 'r') as f:\n for line in f:\n if line.strip().startswith('1'):\n break\n nheader += 1\n except IOError:\n message = f'Unable to open {infile} in modconvert.'\n raise PipeCalError(message)\n index, freq, tbr, flux, trj = np.genfromtxt(infile, unpack=True,\n skip_header=nheader)\n return index, freq, tbr, flux, trj", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set" ]
[ "0.6585251", "0.65576595", "0.6403347", "0.60498667", "0.59925395", "0.59534574", "0.59316444", "0.58384484", "0.5829025", "0.5764885", "0.57411146", "0.571867", "0.57075197", "0.56858313", "0.5685079", "0.56707263", "0.56707263", "0.56704044", "0.5669247", "0.5663918", "0.56601894", "0.56368417", "0.56338936", "0.56320095", "0.5629638", "0.56266713", "0.56266093", "0.5626478", "0.56122434", "0.5606969", "0.5604641", "0.5596437", "0.5588243", "0.5573247", "0.5555221", "0.5554076", "0.5535318", "0.55279326", "0.55172634", "0.5516465", "0.55014586", "0.54795307", "0.54770094", "0.54762495", "0.5467555", "0.5460275", "0.54554725", "0.5452627", "0.5452239", "0.5443032", "0.5435057", "0.542541", "0.54219663", "0.54208875", "0.54199326", "0.5417392", "0.54141486", "0.5409067", "0.5405345", "0.5402951", "0.53983736", "0.53980815", "0.53914046", "0.53913283", "0.53873825", "0.5379223", "0.5364545", "0.5360919", "0.5359491", "0.53445923", "0.5344555", "0.5340683", "0.5337553", "0.5337459", "0.5336658", "0.5334239", "0.53256494", "0.5318822", "0.53146726", "0.5312951", "0.5307273", "0.5305289", "0.53020406", "0.5300052", "0.52981573", "0.5291307", "0.52843344", "0.52823514", "0.52790916", "0.5277442", "0.5276713", "0.5275402", "0.5269164", "0.52606297", "0.52578723", "0.5255951", "0.5243486", "0.52431864", "0.52430576", "0.523705" ]
0.67925274
0
Get features (for regression) based on this bikedata's weather data
def get_weather_features(self): if self.weather_features is None: raise Exception("Weather features not made yet.") ### self.make_weather_features() else: return self.weather_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names", "def extractFeatures(self, datum):\n abstract", "def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats", "def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]", "def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features", "def get_features(self):\n return []", "def get_all_features(self) :\n raise NotImplementedError", "def forecast_weather(self):\n pass", "def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x", "def make_weather_features(self, timeline_dt_list):\n\n print \"Making weather features...\"\n\n N_FEATURES = 2\n n_examples = len(timeline_dt_list)\n XX = numpy.zeros((n_examples, N_FEATURES))\n indices = numpy.zeros(n_examples,dtype='int')\n ind_weatherday = 0\n\n # Loop over all times in the timeline\n for ii, time in enumerate(timeline_dt_list):\n # Find where this time in the timeline matches the date\n # of some weather data.\n jj = ind_weatherday\n while time.date() != self.datetimes[jj].date():\n # Make sure jj does not get too large to be an index to\n # the list.\n # Note this is probably a bad idea to do it this way.\n if jj == len(self.datetimes)-1:\n break\n jj += 1\n## print jj\n\n ind_weatherday = jj\n indices[ii] = ind_weatherday\n\n# XX[ii, 0] = self.table['PrecipIn'][ind_weatherday]\n# XX[ii, 1] = self.table['Mean TemperatureF'][ind_weatherday]\n## XX[ii, 2] = self.table['MeanDew PointF'][ind_weatherday]\n\n XX[:,0] = self.table['PrecipIn'][indices]\n XX[:,1] = self.table['Mean TemperatureF'][indices]\n self.weather_features = XX\n return XX", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def select(self, features):\n if 'Weather Type' not in features:\n features.append('Weather Type')\n self.data = self.data[:,[self._getFIdx(f) for f in features]]\n self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]\n return 0", "def extract_features(time_series, window):\n if not tsd_common.is_standard_time_series(time_series, window):\n # add your report of this error here...\n\n return []\n\n # spilt time_series\n split_time_series = tsd_common.split_time_series(time_series, window)\n # nomalize time_series\n normalized_split_time_series = tsd_common.normalize_time_series(split_time_series)\n max_min_normalized_time_series = tsd_common.normalize_time_series_by_max_min(split_time_series)\n s_features = statistical_features.get_statistical_features(normalized_split_time_series[4])\n f_features = fitting_features.get_fitting_features(normalized_split_time_series)\n c_features = classification_features.get_classification_features(max_min_normalized_time_series)\n # combine features with types\n features = s_features + f_features + c_features\n return features", "def get_dataset_features(text):\n return model.extract(text)", "def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features", "def extract_features(self, inputs):\n pass", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def _get_feature_map(self, time_period=False, volume_filter=False):\n if not self.stocks:\n return False\n\n # Load the data from the stock dictionary\n features = []\n symbol_names = []\n historical_price_info = []\n\n if not time_period:\n today = datetime.datetime.now()\n previous = today - datetime.timedelta(days=60)\n time_period = [previous, today]\n\n for stock in self.stocks:\n price_data = self.db.get_stock_prices(\n stock, time_period=time_period, dataframe=True)\n\n if type(price_data) == bool and not price_data:\n continue\n if len(price_data) < 5:\n continue\n\n volatility = self.stock_engine.volatility(\n price_data, dataframe=True)\n\n if volatility[0] < self.VOLATILITY_FILTER:\n continue\n\n stock_feature_dict = self.stock_engine.get_technical_indicators(\n price_data)\n\n if not stock_feature_dict:\n continue\n\n feature_list = []\n for key in list(sorted(stock_feature_dict.keys())):\n feature_list.extend(stock_feature_dict[key])\n\n if np.isnan(feature_list).any() == True:\n\n continue\n\n avg_volume = np.mean(list(price_data['volume'])[-30:])\n\n if volume_filter and avg_volume < volume_filter:\n continue\n\n features.append(feature_list)\n symbol_names.append(stock)\n historical_price_info.append(price_data)\n features, historical, symbols = self._preproc_data(\n features, historical_price_info, symbol_names)\n\n return features, historical, symbols", "def get_features(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the output\n self.response = self.model.features_df\n self.response[\"sort_order\"] = pd.Series([i+1 for i in range(len(self.response.index))], index=self.response.index)\n self.response = self.response[[\"model_name\", \"sort_order\", \"name\", \"variable_type\", \"data_type\",\\\n \"feature_strategy\", \"strategy_args\"]]\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"features\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def get_features(self):\n return self._features", "def _extract_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.model_surface_temperature if mat.model_surface_temperature!=None else 0)\n mat_features.append(mat.melt_temperature if mat.melt_temperature!=None else 0)\n mat_features.append(mat.mold_temperature_range_min if mat.mold_temperature_range_min!=None else 0)\n mat_features.append(mat.mold_temperature_range_max if mat.mold_temperature_range_max!=None else 0)\n mat_features.append(mat.melt_temperature_range_min if mat.melt_temperature_range_min!=None else 0)\n mat_features.append(mat.melt_temperature_range_max if mat.melt_temperature_range_max!=None else 0)\n mat_features.append(mat.absolute_maximum_melt_temperature if mat.absolute_maximum_melt_temperature!=None else 0)\n mat_features.append(mat.ejection_temperature if mat.ejection_temperature!=None else 0)\n mat_features.append(mat.maximum_shear_stress if mat.maximum_shear_stress!=None else 0)\n mat_features.append(mat.maximum_shear_rate if mat.maximum_shear_rate!=None else 0)\n mat_features.append(mat.melt_density if mat.melt_density!=None else 0)\n mat_features.append(mat.solid_density if mat.solid_density!=None else 0)\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.elastic_modulus_e1 if mat.elastic_modulus_e1!=None else 0)\n mat_features.append(mat.elastic_modulus_e2 if mat.elastic_modulus_e2!=None else 0)\n mat_features.append(mat.poisson_ratio_v12 if mat.poisson_ratio_v12!=None else 0)\n mat_features.append(mat.poisson_ratio_v23 if mat.poisson_ratio_v23!=None else 0)\n mat_features.append(mat.shear_modulus_g12 if mat.shear_modulus_g12!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2!=None else 0.)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n mat_features.append(mat.c1 if mat.c1!=None else 0.)\n mat_features.append(mat.c2 if mat.c2!=None else 0.)\n mat_features.append(mat.conversion_temperature if mat.conversion_temperature!=None else 0.)\n mat_features.append(mat.MFR_temperature if mat.MFR_temperature!=None else 0.)\n mat_features.append(mat.MFR_loading if mat.MFR_loading!=None else 0.)\n mat_features.append(mat.measured_MFR if mat.measured_MFR!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays", "def parse_weather(data: DataFrame) -> List[WeatherData]:\n parsed_results = []\n\n for index, row in data.iterrows():\n date = sqlite3.Date(index.year, index.month, index.day)\n item = WeatherData(\n date=date,\n average_temp=celsius_to_fahr(row.get('tavg', 0)),\n precipitation=row.get('prcp', 0),\n )\n parsed_results.append(item)\n return parsed_results", "def weather_data(cities, openweathermap_api_key=openweathermap_api_key):\n L = []\n for c in cities:\n res = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={c}&appid={openweathermap_api_key}&units=imperial')\n L.append(res.json())\n\n df = pd.DataFrame(L)\n df['lon'] = df['coord'].map(op.itemgetter('lon'))\n df['lat'] = df['coord'].map(op.itemgetter('lat'))\n df['Temprature'] = df['main'].map(op.itemgetter('temp'))\n df['Humidity'] = df['main'].map(op.itemgetter('humidity'))\n df['Wind Speed'] = df['wind'].map(op.itemgetter('speed'))\n return df[['name','lon', 'lat','Temprature','Humidity','Wind Speed']]", "def features(self) -> List[np.ndarray]:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n return [d.features for d in self.data]", "def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def get_features(self, problem_name=None, user_name=None):\n with self.__orm.session_scope() as session:\n results = self._get_features(session, problem_name, user_name).all()\n feature_dicts = []\n for feature, user_name in results:\n d = {\n \"user\" : user_name,\n \"description\" : feature.description,\n \"md5\" : feature.md5,\n \"created_at\" : feature.created_at,\n }\n feature_metrics = session.query(Metric.name,\n Metric.value).filter(Metric.feature_id ==\n feature.id).all()\n # feature_metrics = feature.metrics\n for metric in feature_metrics:\n d[metric.name] = metric.value\n\n feature_dicts.append(d)\n\n if not feature_dicts:\n print(\"No features found\")\n else:\n return pd.DataFrame(feature_dicts)", "def featurize(data):\n features = {}\n missing_weight = False\n for fieldname in STATIC_FIELDS:\n # Static fields use -1 to denote that the value was not measured.\n if data[fieldname][0][1] == -1:\n features[fieldname] = NAN_REPLACE\n else:\n features[fieldname] = float(data[fieldname][0][1])\n for fieldname in FIELDS:\n # Time-series fields may or may not be measured, but if they are present\n # in the dataset, then the value will be valid (i.e. nonnegative).\n if fieldname in data:\n values = [float(d[1]) for d in data[fieldname]]\n if -1 in values and fieldname == 'Weight':\n # Record that weight was missing for this record id.\n missing_weight = True\n field_features = set_features_to_nan(fieldname)\n else:\n field_features = {}\n field_features['{}_min'.format(fieldname)] = min(values)\n field_features['{}_max'.format(fieldname)] = max(values)\n field_features['{}_mean'.format(fieldname)] = np.mean(values)\n field_features['{}_first'.format(fieldname)] = values[0]\n field_features['{}_last'.format(fieldname)] = values[-1]\n field_features['{}_diff'.format(fieldname)] = values[-1] - values[0]\n else:\n field_features = set_features_to_nan(fieldname)\n features.update(field_features)\n return features, missing_weight", "def get_feature_importances(self):\n X,y = self.define_dataset(self.df, self.col_list, self.target_var)\n\n # execute search\n search = self.set_Randomized_search(self.model)\n\n X_train, X_test, y_train, y_test= self.holdout(X, y)\n X_train_sc, X_test_sc = self.scale(X_train, X_test)\n res = search.fit(X_train_sc, y_train)\n\n #model = self.set_model(self.model)\n\n\n if (self.model == \"Lasso\") | (self.model == \"Ridge\"):\n\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = best.coef_\n\n else:\n #RandomForest or XGBoost\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = pd.DataFrame(best.feature_importances_,\n index = X_train.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n return features", "def generateFeatures(self, data):\n pass", "def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])", "def populate_features(self):\n # AssetFeatureValue types\n satellite_feature_value = AssetFeatureValue.Standard.FUND_TYPE_SATELLITE.get_object()\n core_feature_value = AssetFeatureValue.Standard.FUND_TYPE_CORE.get_object()\n\n logger.info('Populating features for ticker %s' % self)\n r_feat = self.get_region_feature_value()\n ac_feat = self.get_asset_class_feature_value()\n curr_feat = self.get_currency_feature_value()\n at_feat = self.get_asset_type_feature_value()\n self.features.clear()\n self.features.add(r_feat, ac_feat, curr_feat, at_feat)\n if self.ethical:\n self.features.add(AssetFeatureValue.Standard.SRI_OTHER.get_object())\n self.features.add(core_feature_value if self.etf else satellite_feature_value)", "def feature_finder(model):\n \n features = model.steps[0][1].get_feature_names()\n feat_values = model[1].coef_\n\n c = {'features' : features}\n feats = pd.DataFrame(data = c)\n feats['values'] = feat_values[0]\n\n sorted_feats = feats.sort_values(by='values')\n return sorted_feats", "def get_features(self, ti=None, tf=None, n_jobs=1, drop_features=[], compute_only_features=[]):\n # initialise training interval\n self.drop_features = drop_features\n self.compute_only_features = compute_only_features\n self.n_jobs = n_jobs\n ti = self.ti_model if ti is None else datetimeify(ti)\n tf = self.tf_model if tf is None else datetimeify(tf)\n return self._load_data(ti, tf)", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights", "def semi_all_static_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n total_num = demand_data[:, 0, -2, np.newaxis]\n slow_num = demand_data[:, 0, 0, np.newaxis]\n fast_num = demand_data[:, 0, 2, np.newaxis]\n\n raw_data = np.concatenate((slow_num, fast_num, total_num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=SEMI_GENERAL_HEADER)\n print(csv_data.shape)\n # print(csv_data.iloc[:, 2])\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'semi_static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def features(self) -> List[np.ndarray]:\n return None", "def __predict_input_fn(self):\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n return features", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n return featuresWeights", "def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx", "def getFeatureData(self, feature):\n return self.data[:,self._getFIdx(feature)]", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def extractRegressionData(combinedPairRDDWeekdaysRecord):\n commuteFields = combinedPairRDDWeekdaysRecord[1][0]\n weatherFields = combinedPairRDDWeekdaysRecord[1][1]\n \n # Assuming 'Snow on ground' field is index 20, if I counted right\n if str(weatherFields[20]) == '':\n weatherFields[20] = '0'\n \n desiredFieldList = commuteFields[1] + ',' + commuteFields[2] + ',' + weatherFields[20]\n return desiredFieldList", "def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_station_features(cls, station_row):\n features = station_row[2].lower(), station_row[7], station_row[8]\n return features", "def generate_features(self):\n\n # For each STFT timebin, divide data into three bins and get mean power\n data_array = np.array([])\n bl_array = np.array([])\n\n for trial in range(self.data_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.data_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.data_stft_norm.shape[0]):\n data_array = np.append(data_array,[\n np.mean(self.data_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 9:27, tbin, trial])])\n\n data_array = np.reshape(data_array, (-1, 18))\n\n for trial in range(self.bl_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.bl_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.bl_stft_norm.shape[0]):\n bl_array = np.append(bl_array, [\n np.mean(self.bl_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 9:27, tbin, trial])])\n bl_array = np.reshape(bl_array, (-1, 18))\n\n X = np.append(data_array, bl_array, axis=0)\n y = np.append(np.ones(data_array.shape[0]), np.zeros(bl_array.shape[0]))\n\n return X, y", "def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes", "def features(self) -> datasets.Features:\n return datasets.Features(\n {\n \"sequence\": datasets.Value(\"string\"),\n \"description\": datasets.Value(\"string\"),\n \"id\": datasets.Value(\"string\"),\n }\n )", "def findFeatures(self):\n\t\tpass", "def feature_eng2(housing_tr, housing):\n logging.info(\"Adding features.....\")\n housing_tr[\"rooms_per_household\"] = (\n housing_tr[\"total_rooms\"] / housing_tr[\"households\"]\n )\n housing_tr[\"bedrooms_per_room\"] = (\n housing_tr[\"total_bedrooms\"] / housing_tr[\"total_rooms\"]\n )\n housing_tr[\"population_per_household\"] = (\n housing_tr[\"population\"] / housing_tr[\"households\"]\n )\n housing_cat = housing[[\"ocean_proximity\"]]\n housing_prepared = housing_tr.join(\n pd.get_dummies(housing_cat, drop_first=True)\n )\n return housing_prepared", "def addFeature(dataset):\n\n dataset[\"H_L\"] = dataset[\"High\"] - dataset[\"Low\"]\n dataset['O_C'] = dataset['Adj Close'] - dataset['Open']\n dataset[\"K_L\"] = (dataset[\"Adj Close\"] - dataset[\"Open\"]) / dataset[\"H_L\"]\n dataset[\"OBV\"] = dataset[\"Volume\"] * \\\n (dataset[\"Adj Close\"] * 2 - dataset[\"H_L\"]) / dataset[\"H_L\"]\n\n dataset['3day MA'] = dataset['Adj Close'].shift(1).rolling(window=3).mean()\n dataset['10day MA'] = dataset['Adj Close'].shift(\n 1).rolling(window=10).mean()\n dataset['30day MA'] = dataset['Adj Close'].shift(\n 1).rolling(window=30).mean()\n dataset['Std_dev'] = dataset['Adj Close'].rolling(5).std()\n\n dataset['RSI'] = talib.RSI(dataset['Adj Close'].values, timeperiod=9)\n dataset['Williams %R'] = talib.WILLR(\n dataset['High'].values, dataset['Low'].values, dataset['Adj Close'].values, 7)\n dataset['Price_Rise'] = np.where(\n dataset['Adj Close'].shift(-1) > dataset['Adj Close'], 1, 0)\n\n dataset = dataset.dropna()\n\n return dataset", "def features(self):\n return self._features", "def extract_features(self) -> DataFrameLike:\n # return already calculated features if stored in state\n if self._final_features:\n return self._finalize_features()\n\n # initialization: generation 0 features are neighborhood features\n features = self.graph.get_neighborhood_features()\n self._update(features)\n\n for generation in range(1, self.max_generations):\n\n self.generation_count = generation\n self._feature_group_thresh = generation\n\n features = self._get_next_features()\n self._update(features)\n\n # stop if an iteration results in no features retained\n if not self._final_features[generation]:\n break\n\n return self._finalize_features()", "def select_features(self, X, y):\n # remove features that are constant\n X = X.loc[:, (X != X.iloc[0]).any()]\n data = {\"X\": X, \"y\": y}\n with open(\"data.pkl\", \"wb\") as data_file:\n pickle.dump(data, data_file)\n\n call([\"python3\", \"select_features.py\"])\n with open(\"rel_features.pkl\", \"rb\") as rel_features:\n relevant_features = pickle.load(rel_features)\n\n return list(relevant_features)", "def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]", "def extract_features(self, data):\n\n # TODO: Should feature extraction be done on the testing data? In the lecture notes\n # TODO: it is not done with the training data, but with the test data.\n # TODO: Maybe we should use the validate data when we do cross-validation.\n\n features = np.zeros([len(data)*self.K]).reshape(len(data), self.K)\n for i in range(len(data)):\n for j in range(self.K):\n features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j])\n\n return features", "def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def features(self):\n other_features = ['listen_type', 'is_context', 'is_context_flow', \n 'is_listened_context', 'is_listened_flow', \n 'is_listened_context_flow']\n \n drop_features = self.categorize_features + self.drop_features + other_features + self.features_bis\n features = np.setdiff1d(self.train.columns.tolist(), drop_features + ['is_listened'], assume_unique=True)\n \n return features", "def _load_data(self, ti, tf):\n # return pre loaded\n try:\n if ti == self.ti_prev and tf == self.tf_prev:\n return self.fM, self.ys\n except AttributeError:\n pass\n\n # read from CSV file\n try:\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n if (t[0] <= ti) and (t[-1] >= tf):\n self.ti_prev = ti\n self.tf_prev = tf\n fM,ys = self._extract_features(ti,tf)\n self.fM = fM\n self.ys = ys\n return fM,ys\n except FileNotFoundError:\n pass\n\n # range checking\n if tf > self.data.tf:\n raise ValueError(\"Model end date '{:s}' beyond data range '{:s}'\".format(tf, self.data.tf))\n if ti < self.data.ti:\n raise ValueError(\"Model start date '{:s}' predates data range '{:s}'\".format(ti, self.data.ti))\n \n # divide training period into years\n ts = [datetime(*[yr, 1, 1, 0, 0, 0]) for yr in list(range(ti.year+1, tf.year+1))]\n if ti - self.dtw < self.data.ti:\n ti = self.data.ti + self.dtw\n ts.insert(0,ti)\n ts.append(tf)\n\n for t0,t1 in zip(ts[:-1], ts[1:]):\n print('feature extraction {:s} to {:s}'.format(t0.strftime('%Y-%m-%d'), t1.strftime('%Y-%m-%d')))\n fM,ys = self._extract_features(ti,t1)\n\n self.ti_prev = ti\n self.tf_prev = tf\n self.fM = fM\n self.ys = ys\n return fM, ys", "def _fetch_features(self, X: np.ndarray, model: CnnModel, output_path: str, subset) -> np.ndarray:\n\n file_helper.guarantee_path_preconditions(output_path)\n\n file_path = join(output_path, subset + '.npy')\n if self._are_features_already_extracted(output_path, subset):\n print('Features already present on: ', file_path)\n features = np.load(file_path)\n else:\n print('Features not present yet, predicting now..')\n features = model.predict(X)\n return features", "def get_weather_data():\n get_pronto_data()\n zp = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zp.open('2015_weather_data.csv')\n return pd.read_csv(file_handle)", "def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)", "def extract_fea_for_full_model(batch,index):\n var = extract_delta_Q_variance(batch,index,start_cycle=10,end_cycle=100)\n min,_ = extract_delta_Q_min_mean(batch,index,start_cycle=10,end_cycle=100)\n slope_2,intercept_2 = extract_slope_intercept_cycle_to_cycle(batch,index,2,100)\n qd_2 = extract_cycle_QDischarge(batch,index,cycle=2)\n avg_time = extract_avg_charge_time_5(batch,index)\n integtal_t = extract_temp_integral_2_to_100(batch,index)\n min_ir = extract_min_ir_2_to_100(batch,index)\n diff_ir = extract_diff_ir_2_100(batch,index)\n\n X = np.hstack((var,min,slope_2,intercept_2,qd_2,avg_time,integtal_t,min_ir,diff_ir))\n return X\n pass", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def derive_variables(self, now, weather_forecast={}):\n # project timestamps into vector space\n if self.use_timestamp:\n time_features = make_time_features(\n now, epoch=self.epoch, epoch_span=self.epoch_span)\n weather_forecast.update(time_features)\n X = pd.DataFrame(weather_forecast, index=[0])\n\n # Only ever see one record a time: pop values from 2D array\n y = self.model.predict(X[self.independent_variables])[0]\n result = {k: v for k, v in zip(self.dependent_variables, y)}\n return result", "def supported_features(self) -> ClimateEntityFeature:\n features = (\n ClimateEntityFeature.TARGET_TEMPERATURE\n | ClimateEntityFeature.FAN_MODE\n | ClimateEntityFeature.PRESET_MODE\n )\n\n if self._client.mode == self._client.MODE_AUTO:\n features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE\n\n if self._client.hum_setpoint is not None:\n features |= ClimateEntityFeature.TARGET_HUMIDITY\n\n return features", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def features(self) -> List[Feature]:\n return self._features", "def learning_data(self, city, field):\r\n df = self.dframe(city)\r\n X = self.time_indices(df)[:, 1:]\r\n y = self.field_numpy(city, field)\r\n return X, y", "def feature_engineer_dataset(data, look_back = 1):\n dataX = []\n dataY = []\n est_range = len(data)-look_back-1\n for i in range(est_range):\n a = data[i:(i+look_back), 0]\n dataX.append(a)\n dataY.append(data[i + look_back, 0])\n\n return numpy.array(dataX), numpy.array(dataY)", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def get_raw_feature_name(self):\n # open a h5 file in case we need it\n f5 = h5py.File(self.train_database[0], 'r')\n mol_name = list(f5.keys())[0]\n raw_data = f5.get(mol_name + '/features/')\n\n # if we select all the features\n if self.select_feature == \"all\":\n self.select_feature = {}\n self.select_feature['AtomicDensities'] = config.atom_vdw_radius_noH\n self.select_feature['Features'] = [\n name for name in raw_data.keys()]\n\n # if a selection was made\n else:\n # we loop over the input dict\n for feat_type, feat_names in self.select_feature.items():\n\n # if for a given type we need all the feature\n if feat_names == 'all':\n if feat_type == 'AtomicDensities':\n self.select_feature['AtomicDensities'] = \\\n config.atom_vdw_radius_noH\n elif feat_type == 'Features':\n self.select_feature[feat_type] = list(\n raw_data.keys())\n else:\n raise KeyError(\n f'Wrong feature type {feat_type}. '\n f'It should be \"AtomicDensities\" or \"Features\".')\n\n else:\n if feat_type == 'AtomicDensities':\n assert isinstance(\n self.select_feature['AtomicDensities'], dict)\n elif feat_type == 'Features':\n self.select_feature[feat_type] = []\n for name in feat_names:\n if '*' in name:\n match = name.split('*')[0]\n possible_names = list(raw_data.keys())\n match_names = [\n n for n in possible_names\n if n.startswith(match)]\n self.select_feature[feat_type] += match_names\n else:\n self.select_feature[feat_type] += [name]\n else:\n raise KeyError(\n f'Wrong feature type {feat_type}. '\n f'It should be \"AtomicDensities\" or \"Features\".')\n\n f5.close()", "def predict_energy_consumption(buildings):\n forecasts = [forecast_for_building(building) for i, building in buildings.iterrows()]\n df = pd.concat(forecasts)\n df.drop(columns=\"id\", inplace=True)\n df = buildings.merge(df, left_on=\"id\", right_on=\"building_id\")\n df[\"meter\"] = 0\n df[\"floor_count\"] = df[\"floorcount\"]\n df[\"air_temperature\"] = df[\"temp\"]\n df[\"relative_humidity\"] = df[\"humidity\"]\n df[\"dew_temperature\"] = df[\"air_temperature\"] - ((100 - df[\"relative_humidity\"]) / 5)\n df[\"precip_depth_1_hr\"] = np.nan\n df[\"timestamp\"] = pd.to_datetime(df[\"date\"])\n df[\"wind_direction\"] = df[\"deg\"]\n df[\"wind_speed\"] = df[\"speed\"]\n\n df.drop(columns=[\"id\", \"name\", \"floorcount\", \"latitude\", \"longitude\", \"user_id\", \"temp\", \"feels_like\", \"temp_min\",\n \"temp_max\", \"pressure\", \"sea_level\", \"grnd_level\", \"humidity\", \"temp_kf\", \"main\", \"description\",\n \"icon\", \"speed\", \"deg\", \"date\"], inplace=True)\n\n df_temp = df.copy(deep=True)\n for i in range(1, 4):\n df_temp[\"meter\"] += 1\n df = pd.concat([df, df_temp])\n del df_temp\n\n cfg = {\n 'circular_timestamp_encoding': False,\n 'log_transform_square_feet': True,\n 'log_transform_area_per_floor': True,\n 'label_square_feet_outlier': True,\n 'label_area_per_floor_outlier': True,\n 'encode_wind_direction': False,\n 'include_feels_like': True,\n 'fill_na_with_zero': False,\n 'add_lag_features': True,\n 'lag_columns': ['air_temperature', 'dew_temperature', 'cloud_coverage'],\n 'lag_windows': [6, 24],\n }\n [df] = build_features(df, cfg=cfg)\n\n df.reset_index(inplace=True, drop=True)\n building_ids = df[\"building_id\"]\n timestamps = df[\"timestamp\"]\n df.drop(columns=[\"timestamp\", \"month\", \"wind_direction\", \"wind_speed\", \"building_id\"], inplace=True)\n\n model_endpoint = \"http://model:5001/predict\"\n data = df.to_json()\n response = requests.get(model_endpoint, json=data).json()\n\n predictions = pd.DataFrame({\"reading\": response[\"prediction\"],\n \"building_id\": building_ids,\n \"meter\": df[\"meter\"],\n \"timestamp\": timestamps,\n \"air_temperature\": df[\"air_temperature\"]})\n return predictions", "def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather", "def get_numerical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.numerical_features]", "def get_features(fpath, data_release, field_in='%', model_in='%', aggregate_classes=False, helpers=None):\n hdffile = h5py.File(fpath, 'r')\n features = np.array(hdffile[data_release])\n hdffile.close()\n # features = features[np.random.randint(features.shape[0], size=100000)]\n\n if aggregate_classes:\n agg_map = helpers.aggregate_sntypes(reverse=True)\n\n indexes = []\n for i, objid in enumerate(features['objid']):\n field, model, base, snid = objid.astype(str).split('_')\n\n if aggregate_classes is True:\n submodels = agg_map[int(model_in)]\n for m in submodels:\n if (field == field_in or field_in == '%') and (model_in == '%' or int(model) == int(m)):\n indexes.append(i)\n else:\n if (field == field_in or field_in == '%') and (model_in == '%' or int(model) == int(model_in)):\n indexes.append(i)\n\n features = features[indexes]\n\n return features", "def getXy_by_features(year, features, sex, age = None):\r\n print 'getXy_by_features(year=%d,features=%s,sex=%s,age=%s)' % (year, features, sex, age)\r\n \r\n X,y,keys = getXy_by_features_(year, features)\r\n X,y,keys = getXy_by_sex_age(X,y,keys, sex, age)\r\n X,y = normalize(X, y)\r\n\r\n return X,y,keys", "def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather", "def features(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n return self._features", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def _get_features_geo(self, id):\n #creates featues/geo tensors for all atoms in protein\n if self.type_feature == \"hot_simple\":\n features = self.hot_enc(id)\n elif self.type_feature == \"mass_charges\":\n features = self.mass_charges(id)\n elif self.type_feature == \"bio_properties\":\n features = self.bio_prop(id)\n elif self.type_feature == \"bio_all_properties\":\n features_1 = self.mass_charges(id)\n features_2 = self.bio_prop(id)\n features = np.concatenate((features_1, features_2), axis=1)\n geometry = self._get_geometry_protein(id)\n return features, geometry", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def getFeatures(gdf):\r\n import json\r\n features = [json.loads(gdf.to_json())['features'][0]['geometry']]\r\n return features", "def get_weather_and_nodes(model, filename, zmin=None):\n # TODO: Need to check how this fcn will fit into the new framework\n xs, ys, proj, t, q, z, lnsp = model.load(filename)\n return (reader.read_model_level(module, xs, ys, proj, t, q, z, lnsp, zmin),\n xs, ys, proj)", "def get_features(lexicon, feature_extractors, info, arg1=None, arg2=None, expand=False):\n feats = []\n for f in feature_extractors:\n res = f(info, arg1, arg2)\n if res is not None:\n for feat_el in generate_feature_element(res):\n _load_features(lexicon, f.__name__ + \"#\" + feat_el, feats, expand=expand)\n return feats", "def load_energy_weather_data(load_raw=None, fnames=TRAIN_FILE_NAMES):\n if load_raw is None:\n load_raw = not os.path.exists(os.path.join(\"..\", \"data\", \"ProcessedData\", \"EnergyWeather_orig_train.pkl\"))\n\n load_time_series.load_all_time_series(datasets=[DATASET], load_raw=load_raw, verbose=VERBOSE)\n ts_list = []\n for name in fnames:\n ts_list.extend(\n load_time_series.load_all_time_series(datasets=[DATASET], load_raw=False, name_pattern=name,\n verbose=False)\n )\n print(name)\n print(ts_list[-1].summarize_ts())\n\n return ts_list", "def predict_bike_demand(weather_data):\n # TODO: connect to the real deal!\n return {s[\"extra\"][\"uid\"]: random.randint(0, 11) for s in MOCK_STATION_STATS}", "def features(self):\n\n return self._features", "def _getFeatures(self, image):\n\n self.model.eval()\n lin_block = 0\n blockwise_features = [image]\n feature = image\n\n for m in self.model.modules():\n # Assume modules are arranged in \"chronological\" fashion\n\n if isinstance(m, nn.ReLU):\n # Get pre-ReLU activations for conv layers\n if len(feature.size()) == 4:\n blockwise_features.append(feature)\n\n if linearity_test(m) is not None:\n if isinstance(m, nn.Linear):\n feature = feature.view(feature.size(0),-1)\n feature = m(feature)\n\n return feature, blockwise_features", "def set_to_features(X_set):\n ext = Extractor()\n features = []\n for i in range(len(X_set)):\n print(i, \" out of \", len(X_set))\n bag_of_features = [ext.extract(X_set[i][j]) for j in range(len(X_set[i]))]\n\n features.append(bag_of_features)\n\n return features", "def getFeatures(self, N=None, indexes=False):\n if indexes:\n features = self.softmax2feats(self.feats, sort_values=True)\n else:\n features = self.feats\n if N:\n return features[:N]\n return features", "def load_data():\n d = load_wine()\n data = {colname: d.data[:, i] for i, colname in enumerate(d.feature_names)}\n data[\"target\"] = d.target\n return pd.DataFrame(data)" ]
[ "0.65693516", "0.6504419", "0.63095343", "0.6184907", "0.6181109", "0.61156017", "0.6097132", "0.60952747", "0.60912675", "0.6084227", "0.6035357", "0.59760165", "0.5962229", "0.5936362", "0.5921777", "0.59124935", "0.59112424", "0.58922076", "0.58688956", "0.58619624", "0.58587444", "0.5850264", "0.5834209", "0.5826236", "0.5792523", "0.5777546", "0.5776913", "0.5746392", "0.5723293", "0.5713805", "0.57037294", "0.5700881", "0.57007366", "0.569196", "0.5684859", "0.568216", "0.5680904", "0.56664973", "0.5666396", "0.56618077", "0.56588775", "0.56491935", "0.5645389", "0.5642727", "0.5638805", "0.56325877", "0.56318897", "0.56290126", "0.56233275", "0.5594289", "0.5593765", "0.55865544", "0.55836016", "0.5578603", "0.5576169", "0.55743", "0.55739516", "0.5556689", "0.5554021", "0.5553244", "0.5538897", "0.5538222", "0.5537444", "0.5512315", "0.5505194", "0.5489748", "0.5489582", "0.5475917", "0.5475308", "0.5472514", "0.5470829", "0.5465581", "0.5462728", "0.5459244", "0.54561424", "0.54482764", "0.5445688", "0.5443401", "0.54355145", "0.5433617", "0.54308224", "0.54274726", "0.5421308", "0.54157776", "0.5411144", "0.5399299", "0.5395402", "0.53938925", "0.53911775", "0.5391121", "0.5388394", "0.53828466", "0.5381702", "0.5380867", "0.5378172", "0.5375909", "0.5369885", "0.535468", "0.5353799", "0.53535014" ]
0.7378489
0
Get features (for regression) based on the weather data
def make_weather_features(self, timeline_dt_list): print "Making weather features..." N_FEATURES = 2 n_examples = len(timeline_dt_list) XX = numpy.zeros((n_examples, N_FEATURES)) indices = numpy.zeros(n_examples,dtype='int') ind_weatherday = 0 # Loop over all times in the timeline for ii, time in enumerate(timeline_dt_list): # Find where this time in the timeline matches the date # of some weather data. jj = ind_weatherday while time.date() != self.datetimes[jj].date(): # Make sure jj does not get too large to be an index to # the list. # Note this is probably a bad idea to do it this way. if jj == len(self.datetimes)-1: break jj += 1 ## print jj ind_weatherday = jj indices[ii] = ind_weatherday # XX[ii, 0] = self.table['PrecipIn'][ind_weatherday] # XX[ii, 1] = self.table['Mean TemperatureF'][ind_weatherday] ## XX[ii, 2] = self.table['MeanDew PointF'][ind_weatherday] XX[:,0] = self.table['PrecipIn'][indices] XX[:,1] = self.table['Mean TemperatureF'][indices] self.weather_features = XX return XX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weather_features(self):\n if self.weather_features is None:\n raise Exception(\"Weather features not made yet.\")\n### self.make_weather_features()\n else:\n return self.weather_features", "def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names", "def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def forecast_weather(self):\n pass", "def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x", "def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]", "def extractFeatures(self, datum):\n abstract", "def extract_features(time_series, window):\n if not tsd_common.is_standard_time_series(time_series, window):\n # add your report of this error here...\n\n return []\n\n # spilt time_series\n split_time_series = tsd_common.split_time_series(time_series, window)\n # nomalize time_series\n normalized_split_time_series = tsd_common.normalize_time_series(split_time_series)\n max_min_normalized_time_series = tsd_common.normalize_time_series_by_max_min(split_time_series)\n s_features = statistical_features.get_statistical_features(normalized_split_time_series[4])\n f_features = fitting_features.get_fitting_features(normalized_split_time_series)\n c_features = classification_features.get_classification_features(max_min_normalized_time_series)\n # combine features with types\n features = s_features + f_features + c_features\n return features", "def extract_features(self, inputs):\n pass", "def get_all_features(self) :\n raise NotImplementedError", "def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def select(self, features):\n if 'Weather Type' not in features:\n features.append('Weather Type')\n self.data = self.data[:,[self._getFIdx(f) for f in features]]\n self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]\n return 0", "def get_features(self):\n return []", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def weather_data(cities, openweathermap_api_key=openweathermap_api_key):\n L = []\n for c in cities:\n res = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={c}&appid={openweathermap_api_key}&units=imperial')\n L.append(res.json())\n\n df = pd.DataFrame(L)\n df['lon'] = df['coord'].map(op.itemgetter('lon'))\n df['lat'] = df['coord'].map(op.itemgetter('lat'))\n df['Temprature'] = df['main'].map(op.itemgetter('temp'))\n df['Humidity'] = df['main'].map(op.itemgetter('humidity'))\n df['Wind Speed'] = df['wind'].map(op.itemgetter('speed'))\n return df[['name','lon', 'lat','Temprature','Humidity','Wind Speed']]", "def get_dataset_features(text):\n return model.extract(text)", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def get_features(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the output\n self.response = self.model.features_df\n self.response[\"sort_order\"] = pd.Series([i+1 for i in range(len(self.response.index))], index=self.response.index)\n self.response = self.response[[\"model_name\", \"sort_order\", \"name\", \"variable_type\", \"data_type\",\\\n \"feature_strategy\", \"strategy_args\"]]\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"features\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def feature_finder(model):\n \n features = model.steps[0][1].get_feature_names()\n feat_values = model[1].coef_\n\n c = {'features' : features}\n feats = pd.DataFrame(data = c)\n feats['values'] = feat_values[0]\n\n sorted_feats = feats.sort_values(by='values')\n return sorted_feats", "def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]", "def _get_feature_map(self, time_period=False, volume_filter=False):\n if not self.stocks:\n return False\n\n # Load the data from the stock dictionary\n features = []\n symbol_names = []\n historical_price_info = []\n\n if not time_period:\n today = datetime.datetime.now()\n previous = today - datetime.timedelta(days=60)\n time_period = [previous, today]\n\n for stock in self.stocks:\n price_data = self.db.get_stock_prices(\n stock, time_period=time_period, dataframe=True)\n\n if type(price_data) == bool and not price_data:\n continue\n if len(price_data) < 5:\n continue\n\n volatility = self.stock_engine.volatility(\n price_data, dataframe=True)\n\n if volatility[0] < self.VOLATILITY_FILTER:\n continue\n\n stock_feature_dict = self.stock_engine.get_technical_indicators(\n price_data)\n\n if not stock_feature_dict:\n continue\n\n feature_list = []\n for key in list(sorted(stock_feature_dict.keys())):\n feature_list.extend(stock_feature_dict[key])\n\n if np.isnan(feature_list).any() == True:\n\n continue\n\n avg_volume = np.mean(list(price_data['volume'])[-30:])\n\n if volume_filter and avg_volume < volume_filter:\n continue\n\n features.append(feature_list)\n symbol_names.append(stock)\n historical_price_info.append(price_data)\n features, historical, symbols = self._preproc_data(\n features, historical_price_info, symbol_names)\n\n return features, historical, symbols", "def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])", "def get_weather_and_nodes(model, filename, zmin=None):\n # TODO: Need to check how this fcn will fit into the new framework\n xs, ys, proj, t, q, z, lnsp = model.load(filename)\n return (reader.read_model_level(module, xs, ys, proj, t, q, z, lnsp, zmin),\n xs, ys, proj)", "def __predict_input_fn(self):\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n return features", "def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx", "def get_feature_importances(self):\n X,y = self.define_dataset(self.df, self.col_list, self.target_var)\n\n # execute search\n search = self.set_Randomized_search(self.model)\n\n X_train, X_test, y_train, y_test= self.holdout(X, y)\n X_train_sc, X_test_sc = self.scale(X_train, X_test)\n res = search.fit(X_train_sc, y_train)\n\n #model = self.set_model(self.model)\n\n\n if (self.model == \"Lasso\") | (self.model == \"Ridge\"):\n\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = best.coef_\n\n else:\n #RandomForest or XGBoost\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = pd.DataFrame(best.feature_importances_,\n index = X_train.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n return features", "def feature_eng2(housing_tr, housing):\n logging.info(\"Adding features.....\")\n housing_tr[\"rooms_per_household\"] = (\n housing_tr[\"total_rooms\"] / housing_tr[\"households\"]\n )\n housing_tr[\"bedrooms_per_room\"] = (\n housing_tr[\"total_bedrooms\"] / housing_tr[\"total_rooms\"]\n )\n housing_tr[\"population_per_household\"] = (\n housing_tr[\"population\"] / housing_tr[\"households\"]\n )\n housing_cat = housing[[\"ocean_proximity\"]]\n housing_prepared = housing_tr.join(\n pd.get_dummies(housing_cat, drop_first=True)\n )\n return housing_prepared", "def load_energy_weather_data(load_raw=None, fnames=TRAIN_FILE_NAMES):\n if load_raw is None:\n load_raw = not os.path.exists(os.path.join(\"..\", \"data\", \"ProcessedData\", \"EnergyWeather_orig_train.pkl\"))\n\n load_time_series.load_all_time_series(datasets=[DATASET], load_raw=load_raw, verbose=VERBOSE)\n ts_list = []\n for name in fnames:\n ts_list.extend(\n load_time_series.load_all_time_series(datasets=[DATASET], load_raw=False, name_pattern=name,\n verbose=False)\n )\n print(name)\n print(ts_list[-1].summarize_ts())\n\n return ts_list", "def parse_weather(data: DataFrame) -> List[WeatherData]:\n parsed_results = []\n\n for index, row in data.iterrows():\n date = sqlite3.Date(index.year, index.month, index.day)\n item = WeatherData(\n date=date,\n average_temp=celsius_to_fahr(row.get('tavg', 0)),\n precipitation=row.get('prcp', 0),\n )\n parsed_results.append(item)\n return parsed_results", "def semi_all_static_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n total_num = demand_data[:, 0, -2, np.newaxis]\n slow_num = demand_data[:, 0, 0, np.newaxis]\n fast_num = demand_data[:, 0, 2, np.newaxis]\n\n raw_data = np.concatenate((slow_num, fast_num, total_num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=SEMI_GENERAL_HEADER)\n print(csv_data.shape)\n # print(csv_data.iloc[:, 2])\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'semi_static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def _extract_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.model_surface_temperature if mat.model_surface_temperature!=None else 0)\n mat_features.append(mat.melt_temperature if mat.melt_temperature!=None else 0)\n mat_features.append(mat.mold_temperature_range_min if mat.mold_temperature_range_min!=None else 0)\n mat_features.append(mat.mold_temperature_range_max if mat.mold_temperature_range_max!=None else 0)\n mat_features.append(mat.melt_temperature_range_min if mat.melt_temperature_range_min!=None else 0)\n mat_features.append(mat.melt_temperature_range_max if mat.melt_temperature_range_max!=None else 0)\n mat_features.append(mat.absolute_maximum_melt_temperature if mat.absolute_maximum_melt_temperature!=None else 0)\n mat_features.append(mat.ejection_temperature if mat.ejection_temperature!=None else 0)\n mat_features.append(mat.maximum_shear_stress if mat.maximum_shear_stress!=None else 0)\n mat_features.append(mat.maximum_shear_rate if mat.maximum_shear_rate!=None else 0)\n mat_features.append(mat.melt_density if mat.melt_density!=None else 0)\n mat_features.append(mat.solid_density if mat.solid_density!=None else 0)\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.elastic_modulus_e1 if mat.elastic_modulus_e1!=None else 0)\n mat_features.append(mat.elastic_modulus_e2 if mat.elastic_modulus_e2!=None else 0)\n mat_features.append(mat.poisson_ratio_v12 if mat.poisson_ratio_v12!=None else 0)\n mat_features.append(mat.poisson_ratio_v23 if mat.poisson_ratio_v23!=None else 0)\n mat_features.append(mat.shear_modulus_g12 if mat.shear_modulus_g12!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2!=None else 0.)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n mat_features.append(mat.c1 if mat.c1!=None else 0.)\n mat_features.append(mat.c2 if mat.c2!=None else 0.)\n mat_features.append(mat.conversion_temperature if mat.conversion_temperature!=None else 0.)\n mat_features.append(mat.MFR_temperature if mat.MFR_temperature!=None else 0.)\n mat_features.append(mat.MFR_loading if mat.MFR_loading!=None else 0.)\n mat_features.append(mat.measured_MFR if mat.measured_MFR!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def supported_features(self) -> ClimateEntityFeature:\n features = (\n ClimateEntityFeature.TARGET_TEMPERATURE\n | ClimateEntityFeature.FAN_MODE\n | ClimateEntityFeature.PRESET_MODE\n )\n\n if self._client.mode == self._client.MODE_AUTO:\n features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE\n\n if self._client.hum_setpoint is not None:\n features |= ClimateEntityFeature.TARGET_HUMIDITY\n\n return features", "def get_features(self, problem_name=None, user_name=None):\n with self.__orm.session_scope() as session:\n results = self._get_features(session, problem_name, user_name).all()\n feature_dicts = []\n for feature, user_name in results:\n d = {\n \"user\" : user_name,\n \"description\" : feature.description,\n \"md5\" : feature.md5,\n \"created_at\" : feature.created_at,\n }\n feature_metrics = session.query(Metric.name,\n Metric.value).filter(Metric.feature_id ==\n feature.id).all()\n # feature_metrics = feature.metrics\n for metric in feature_metrics:\n d[metric.name] = metric.value\n\n feature_dicts.append(d)\n\n if not feature_dicts:\n print(\"No features found\")\n else:\n return pd.DataFrame(feature_dicts)", "def get_features(lexicon, feature_extractors, info, arg1=None, arg2=None, expand=False):\n feats = []\n for f in feature_extractors:\n res = f(info, arg1, arg2)\n if res is not None:\n for feat_el in generate_feature_element(res):\n _load_features(lexicon, f.__name__ + \"#\" + feat_el, feats, expand=expand)\n return feats", "def get_features(self):\n return self._features", "def get_features(self, ti=None, tf=None, n_jobs=1, drop_features=[], compute_only_features=[]):\n # initialise training interval\n self.drop_features = drop_features\n self.compute_only_features = compute_only_features\n self.n_jobs = n_jobs\n ti = self.ti_model if ti is None else datetimeify(ti)\n tf = self.tf_model if tf is None else datetimeify(tf)\n return self._load_data(ti, tf)", "def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]", "def findFeatures(self):\n\t\tpass", "def get_weather_data():\n get_pronto_data()\n zp = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zp.open('2015_weather_data.csv')\n return pd.read_csv(file_handle)", "def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]", "def get_weather(latitude, longitude, units):\n global API_KEY\n qs = {\n 'lat': latitude,\n 'lon': longitude,\n 'APPID': API_KEY,\n 'units': units\n }\n qs = urllib.parse.urlencode(qs)\n url = \"http://api.openweathermap.org/data/2.5/weather?{}\".format(qs)\n weather = requests.get(url)\n weather_json = json.loads(weather.content)\n category = weather_json['weather'][0]['id']\n temp = weather_json['main']['temp']\n wind_speed = weather_json['wind']['speed']\n\n return category, temp, wind_speed", "def generateFeatures(self, data):\n pass", "def _load_data(self, ti, tf):\n # return pre loaded\n try:\n if ti == self.ti_prev and tf == self.tf_prev:\n return self.fM, self.ys\n except AttributeError:\n pass\n\n # read from CSV file\n try:\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n if (t[0] <= ti) and (t[-1] >= tf):\n self.ti_prev = ti\n self.tf_prev = tf\n fM,ys = self._extract_features(ti,tf)\n self.fM = fM\n self.ys = ys\n return fM,ys\n except FileNotFoundError:\n pass\n\n # range checking\n if tf > self.data.tf:\n raise ValueError(\"Model end date '{:s}' beyond data range '{:s}'\".format(tf, self.data.tf))\n if ti < self.data.ti:\n raise ValueError(\"Model start date '{:s}' predates data range '{:s}'\".format(ti, self.data.ti))\n \n # divide training period into years\n ts = [datetime(*[yr, 1, 1, 0, 0, 0]) for yr in list(range(ti.year+1, tf.year+1))]\n if ti - self.dtw < self.data.ti:\n ti = self.data.ti + self.dtw\n ts.insert(0,ti)\n ts.append(tf)\n\n for t0,t1 in zip(ts[:-1], ts[1:]):\n print('feature extraction {:s} to {:s}'.format(t0.strftime('%Y-%m-%d'), t1.strftime('%Y-%m-%d')))\n fM,ys = self._extract_features(ti,t1)\n\n self.ti_prev = ti\n self.tf_prev = tf\n self.fM = fM\n self.ys = ys\n return fM, ys", "def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return", "def extract_fea_for_full_model(batch,index):\n var = extract_delta_Q_variance(batch,index,start_cycle=10,end_cycle=100)\n min,_ = extract_delta_Q_min_mean(batch,index,start_cycle=10,end_cycle=100)\n slope_2,intercept_2 = extract_slope_intercept_cycle_to_cycle(batch,index,2,100)\n qd_2 = extract_cycle_QDischarge(batch,index,cycle=2)\n avg_time = extract_avg_charge_time_5(batch,index)\n integtal_t = extract_temp_integral_2_to_100(batch,index)\n min_ir = extract_min_ir_2_to_100(batch,index)\n diff_ir = extract_diff_ir_2_100(batch,index)\n\n X = np.hstack((var,min,slope_2,intercept_2,qd_2,avg_time,integtal_t,min_ir,diff_ir))\n return X\n pass", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_station_features(cls, station_row):\n features = station_row[2].lower(), station_row[7], station_row[8]\n return features", "def predict_energy_consumption(buildings):\n forecasts = [forecast_for_building(building) for i, building in buildings.iterrows()]\n df = pd.concat(forecasts)\n df.drop(columns=\"id\", inplace=True)\n df = buildings.merge(df, left_on=\"id\", right_on=\"building_id\")\n df[\"meter\"] = 0\n df[\"floor_count\"] = df[\"floorcount\"]\n df[\"air_temperature\"] = df[\"temp\"]\n df[\"relative_humidity\"] = df[\"humidity\"]\n df[\"dew_temperature\"] = df[\"air_temperature\"] - ((100 - df[\"relative_humidity\"]) / 5)\n df[\"precip_depth_1_hr\"] = np.nan\n df[\"timestamp\"] = pd.to_datetime(df[\"date\"])\n df[\"wind_direction\"] = df[\"deg\"]\n df[\"wind_speed\"] = df[\"speed\"]\n\n df.drop(columns=[\"id\", \"name\", \"floorcount\", \"latitude\", \"longitude\", \"user_id\", \"temp\", \"feels_like\", \"temp_min\",\n \"temp_max\", \"pressure\", \"sea_level\", \"grnd_level\", \"humidity\", \"temp_kf\", \"main\", \"description\",\n \"icon\", \"speed\", \"deg\", \"date\"], inplace=True)\n\n df_temp = df.copy(deep=True)\n for i in range(1, 4):\n df_temp[\"meter\"] += 1\n df = pd.concat([df, df_temp])\n del df_temp\n\n cfg = {\n 'circular_timestamp_encoding': False,\n 'log_transform_square_feet': True,\n 'log_transform_area_per_floor': True,\n 'label_square_feet_outlier': True,\n 'label_area_per_floor_outlier': True,\n 'encode_wind_direction': False,\n 'include_feels_like': True,\n 'fill_na_with_zero': False,\n 'add_lag_features': True,\n 'lag_columns': ['air_temperature', 'dew_temperature', 'cloud_coverage'],\n 'lag_windows': [6, 24],\n }\n [df] = build_features(df, cfg=cfg)\n\n df.reset_index(inplace=True, drop=True)\n building_ids = df[\"building_id\"]\n timestamps = df[\"timestamp\"]\n df.drop(columns=[\"timestamp\", \"month\", \"wind_direction\", \"wind_speed\", \"building_id\"], inplace=True)\n\n model_endpoint = \"http://model:5001/predict\"\n data = df.to_json()\n response = requests.get(model_endpoint, json=data).json()\n\n predictions = pd.DataFrame({\"reading\": response[\"prediction\"],\n \"building_id\": building_ids,\n \"meter\": df[\"meter\"],\n \"timestamp\": timestamps,\n \"air_temperature\": df[\"air_temperature\"]})\n return predictions", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights", "def addFeature(dataset):\n\n dataset[\"H_L\"] = dataset[\"High\"] - dataset[\"Low\"]\n dataset['O_C'] = dataset['Adj Close'] - dataset['Open']\n dataset[\"K_L\"] = (dataset[\"Adj Close\"] - dataset[\"Open\"]) / dataset[\"H_L\"]\n dataset[\"OBV\"] = dataset[\"Volume\"] * \\\n (dataset[\"Adj Close\"] * 2 - dataset[\"H_L\"]) / dataset[\"H_L\"]\n\n dataset['3day MA'] = dataset['Adj Close'].shift(1).rolling(window=3).mean()\n dataset['10day MA'] = dataset['Adj Close'].shift(\n 1).rolling(window=10).mean()\n dataset['30day MA'] = dataset['Adj Close'].shift(\n 1).rolling(window=30).mean()\n dataset['Std_dev'] = dataset['Adj Close'].rolling(5).std()\n\n dataset['RSI'] = talib.RSI(dataset['Adj Close'].values, timeperiod=9)\n dataset['Williams %R'] = talib.WILLR(\n dataset['High'].values, dataset['Low'].values, dataset['Adj Close'].values, 7)\n dataset['Price_Rise'] = np.where(\n dataset['Adj Close'].shift(-1) > dataset['Adj Close'], 1, 0)\n\n dataset = dataset.dropna()\n\n return dataset", "def features(self) -> List[np.ndarray]:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n return [d.features for d in self.data]", "def featurize(data):\n features = {}\n missing_weight = False\n for fieldname in STATIC_FIELDS:\n # Static fields use -1 to denote that the value was not measured.\n if data[fieldname][0][1] == -1:\n features[fieldname] = NAN_REPLACE\n else:\n features[fieldname] = float(data[fieldname][0][1])\n for fieldname in FIELDS:\n # Time-series fields may or may not be measured, but if they are present\n # in the dataset, then the value will be valid (i.e. nonnegative).\n if fieldname in data:\n values = [float(d[1]) for d in data[fieldname]]\n if -1 in values and fieldname == 'Weight':\n # Record that weight was missing for this record id.\n missing_weight = True\n field_features = set_features_to_nan(fieldname)\n else:\n field_features = {}\n field_features['{}_min'.format(fieldname)] = min(values)\n field_features['{}_max'.format(fieldname)] = max(values)\n field_features['{}_mean'.format(fieldname)] = np.mean(values)\n field_features['{}_first'.format(fieldname)] = values[0]\n field_features['{}_last'.format(fieldname)] = values[-1]\n field_features['{}_diff'.format(fieldname)] = values[-1] - values[0]\n else:\n field_features = set_features_to_nan(fieldname)\n features.update(field_features)\n return features, missing_weight", "def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather", "def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n return featuresWeights", "def predict_all_features(input_data=\"not defined\"):\r\n X, y = splitting.get_x_and_y()\r\n output_dataframe = pd.DataFrame\r\n y_pred_dataframe = pd.DataFrame\r\n for actual_y in y:\r\n X_train, X_test, y_train, y_test = splitting.splitting_data(y=actual_y)\r\n y_pred, predicted_units = linear_regresstion_action(X_train, X_test, y_train, y_test, input_data)\r\n # not sure if scores[actual_y.name] works as well or even scores[actual_y]...\r\n # one need to test if input data is final\r\n output_dataframe[f\"{actual_y.name}\"] = predicted_units\r\n y_pred_dataframe[f\"{actual_y.name}\"] = y_pred\r\n return y_pred_dataframe, output_dataframe", "def getRegressionOutput(self):\n\n # Construct train data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_train = self.y_train.reshape((self.y_train.shape[0],1))\n y_train_price = self.y_train_price.reshape((self.y_train_price.shape[0],1))\n\n X_train = np.concatenate((self.X_train, y_train, y_train_price), axis=1)\n\n # choose one route datas\n X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_train = X_train[:, 0:14]\n\n # group by the feature: departure date\n departureDates_train = np.unique(X_train[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_train:\n indexs = np.where(X_train[:, 8]==departureDate)[0]\n datas = X_train[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_train = X_tmp[:, 0:12]\n y_train = X_tmp[:, 12]\n y_train_price = X_tmp[:, 13]\n y_train = y_train.reshape((y_train.shape[0], 1))\n y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))\n\n\n X_train = np.concatenate((X_train, y_train_price), axis=1)\n np.save('inputReg/X_train', X_train)\n np.save('inputReg/y_train', y_train)\n np.save('inputReg/y_train_price', y_train_price)\n\n\n # Construct test data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_test = self.y_test.reshape((self.y_test.shape[0],1))\n y_test_price = self.y_test_price.reshape((self.y_test_price.shape[0],1))\n\n X_test = np.concatenate((self.X_test, y_test, y_test_price), axis=1)\n\n # choose one route datas\n X_test = X_test[np.where(X_test[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_test = X_test[:, 0:14]\n\n # group by the feature: departure date\n departureDates_test = np.unique(X_test[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_test:\n indexs = np.where(X_test[:, 8]==departureDate)[0]\n datas = X_test[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_test = X_tmp[:, 0:12]\n y_test = X_tmp[:, 12]\n y_test_price = X_tmp[:, 13]\n y_test = y_test.reshape((y_test.shape[0], 1))\n y_test_price = y_test_price.reshape((y_test_price.shape[0], 1))\n X_test = np.concatenate((X_test, y_test_price), axis=1)\n np.save('inputReg/X_test', X_test)\n np.save('inputReg/y_test', y_test)\n np.save('inputReg/y_test_price', y_test_price)", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def select_features(self, X, y):\n # remove features that are constant\n X = X.loc[:, (X != X.iloc[0]).any()]\n data = {\"X\": X, \"y\": y}\n with open(\"data.pkl\", \"wb\") as data_file:\n pickle.dump(data, data_file)\n\n call([\"python3\", \"select_features.py\"])\n with open(\"rel_features.pkl\", \"rb\") as rel_features:\n relevant_features = pickle.load(rel_features)\n\n return list(relevant_features)", "def _fetch_features(self, X: np.ndarray, model: CnnModel, output_path: str, subset) -> np.ndarray:\n\n file_helper.guarantee_path_preconditions(output_path)\n\n file_path = join(output_path, subset + '.npy')\n if self._are_features_already_extracted(output_path, subset):\n print('Features already present on: ', file_path)\n features = np.load(file_path)\n else:\n print('Features not present yet, predicting now..')\n features = model.predict(X)\n return features", "def get_sun_features(image): # Use grayscale images, outside val: NaN\r\n ratio = sun_isoperimetric_ratio(image)\r\n sun_features = {\"sun_circularity_ratio\": ratio}\r\n return sun_features", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def temperatures():\n\n return station_9281", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def derive_variables(self, now, weather_forecast={}):\n # project timestamps into vector space\n if self.use_timestamp:\n time_features = make_time_features(\n now, epoch=self.epoch, epoch_span=self.epoch_span)\n weather_forecast.update(time_features)\n X = pd.DataFrame(weather_forecast, index=[0])\n\n # Only ever see one record a time: pop values from 2D array\n y = self.model.predict(X[self.independent_variables])[0]\n result = {k: v for k, v in zip(self.dependent_variables, y)}\n return result", "def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info", "def feature_extraction(images, save_to='dataset.csv'):\n num_images = len(images)\n logging.info(f\"Extracting features from {num_images} images...\")\n x = np.zeros((num_images, 7))\n y = np.zeros(num_images, dtype=np.int8)\n\n for i, image in enumerate(images):\n logging.info(f\"Processing Image {i+1}/{num_images}...\")\n y[i] = 0 if image.name.startswith('cyl') \\\n else 1 if image.name.startswith('inter') \\\n else 2 if image.name.startswith('let') \\\n else 3 if image.name.startswith('mod') \\\n else 4 if image.name.startswith('para') \\\n else 5 if image.name.startswith('super') \\\n else 6 if image.name.startswith('svar') else -1\n \n # Get number of object pixels in segmented color channels, which become features 0-3\n for color in [0,1,2,4]: # 3 is the color index for RGB so we skip that and use 4 (grayscale)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n if len(uniques) > 2:\n image = image.otsu(color)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n x[i,color if color is not 4 else 3] = counts[0]\n\n x[i,4] = np.std(image.getHistogram(4))\n\n x[i,5] = np.argmax(image.getHistogram(4))\n\n x[i,6] = np.argmin(image.getHistogram(4))\n\n # Save new dataset to file\n np.savetxt(save_to, np.concatenate([x,np.atleast_2d(y).T], axis=1), delimiter=',', fmt='%s')\n\n return x, y", "def getXy_by_features(year, features, sex, age = None):\r\n print 'getXy_by_features(year=%d,features=%s,sex=%s,age=%s)' % (year, features, sex, age)\r\n \r\n X,y,keys = getXy_by_features_(year, features)\r\n X,y,keys = getXy_by_sex_age(X,y,keys, sex, age)\r\n X,y = normalize(X, y)\r\n\r\n return X,y,keys", "def load_data():\n d = load_wine()\n data = {colname: d.data[:, i] for i, colname in enumerate(d.feature_names)}\n data[\"target\"] = d.target\n return pd.DataFrame(data)", "async def get_temperatures(self, **kwargs: Any) -> Dict[str, float]:\n ...", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)", "def features(self) -> List[np.ndarray]:\n return None", "def populate_features(self):\n # AssetFeatureValue types\n satellite_feature_value = AssetFeatureValue.Standard.FUND_TYPE_SATELLITE.get_object()\n core_feature_value = AssetFeatureValue.Standard.FUND_TYPE_CORE.get_object()\n\n logger.info('Populating features for ticker %s' % self)\n r_feat = self.get_region_feature_value()\n ac_feat = self.get_asset_class_feature_value()\n curr_feat = self.get_currency_feature_value()\n at_feat = self.get_asset_type_feature_value()\n self.features.clear()\n self.features.add(r_feat, ac_feat, curr_feat, at_feat)\n if self.ethical:\n self.features.add(AssetFeatureValue.Standard.SRI_OTHER.get_object())\n self.features.add(core_feature_value if self.etf else satellite_feature_value)", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def getFeatures(self, N=None, indexes=False):\n if indexes:\n features = self.softmax2feats(self.feats, sort_values=True)\n else:\n features = self.feats\n if N:\n return features[:N]\n return features", "def generate_features(self):\n\n # For each STFT timebin, divide data into three bins and get mean power\n data_array = np.array([])\n bl_array = np.array([])\n\n for trial in range(self.data_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.data_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.data_stft_norm.shape[0]):\n data_array = np.append(data_array,[\n np.mean(self.data_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 9:27, tbin, trial])])\n\n data_array = np.reshape(data_array, (-1, 18))\n\n for trial in range(self.bl_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.bl_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.bl_stft_norm.shape[0]):\n bl_array = np.append(bl_array, [\n np.mean(self.bl_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 9:27, tbin, trial])])\n bl_array = np.reshape(bl_array, (-1, 18))\n\n X = np.append(data_array, bl_array, axis=0)\n y = np.append(np.ones(data_array.shape[0]), np.zeros(bl_array.shape[0]))\n\n return X, y", "def extractRegressionData(combinedPairRDDWeekdaysRecord):\n commuteFields = combinedPairRDDWeekdaysRecord[1][0]\n weatherFields = combinedPairRDDWeekdaysRecord[1][1]\n \n # Assuming 'Snow on ground' field is index 20, if I counted right\n if str(weatherFields[20]) == '':\n weatherFields[20] = '0'\n \n desiredFieldList = commuteFields[1] + ',' + commuteFields[2] + ',' + weatherFields[20]\n return desiredFieldList", "def model():\n return TimeSeriesMultiReg()", "def _extract_features(self, times):\n times[1] = time()\n data = {n:self._extract_feature(f) for (n,f) in self.features.items()} \n times[2] = time()\n return (data, times, os.getpid())", "def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes", "def extract_features(self) -> DataFrameLike:\n # return already calculated features if stored in state\n if self._final_features:\n return self._finalize_features()\n\n # initialization: generation 0 features are neighborhood features\n features = self.graph.get_neighborhood_features()\n self._update(features)\n\n for generation in range(1, self.max_generations):\n\n self.generation_count = generation\n self._feature_group_thresh = generation\n\n features = self._get_next_features()\n self._update(features)\n\n # stop if an iteration results in no features retained\n if not self._final_features[generation]:\n break\n\n return self._finalize_features()", "def extract_features(net, ims):\n outs = net(ims)\n if isinstance(outs, list):\n outs = outs[1]\n features = outs.data\n return features", "def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather", "def get_features_for_input(self, inputs):\n inputs = inputs.to(self.device)\n with torch.no_grad():\n features = self.model.encode(inputs)\n return features", "def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data", "def set_to_features(X_set):\n ext = Extractor()\n features = []\n for i in range(len(X_set)):\n print(i, \" out of \", len(X_set))\n bag_of_features = [ext.extract(X_set[i][j]) for j in range(len(X_set[i]))]\n\n features.append(bag_of_features)\n\n return features", "def features(self):\n return self._features", "def read_training(index_columns=None, both=False, weather=False):\n if weather:\n raw_X_train = pd.read_csv('data\\\\train_X.csv', parse_dates=['date'])\n raw_weather = pd.read_csv('data\\\\weather_data.csv', parse_dates=['date'])\n\n raw_X_train = ffill_nans(raw_X_train)\n raw_X_train = raw_X_train.merge(raw_weather, how='left', on=['date','hour'])\n raw_X_train = raw_X_train.set_index(index_columns)\n\n else:\n raw_X_train = pd.read_csv(\n 'data\\\\train_X.csv',\n parse_dates=['date'],\n index_col=index_columns)\n if both:\n raw_y_train = pd.read_csv(\n 'data\\\\train_y.csv',\n parse_dates=['date'],\n index_col=index_columns)\n\n return raw_X_train, raw_y_train\n \n return raw_X_train", "def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)", "def get_weather(self):\n return self.__weather" ]
[ "0.7582345", "0.6779436", "0.6377024", "0.6304283", "0.6291205", "0.62426645", "0.62333375", "0.61894417", "0.6091039", "0.6048604", "0.6045246", "0.6044608", "0.60266703", "0.5981941", "0.59668094", "0.5964047", "0.5908457", "0.5897442", "0.5888104", "0.5868746", "0.5860874", "0.5860766", "0.5845543", "0.58411765", "0.58278084", "0.58242404", "0.5812056", "0.5809468", "0.5791883", "0.57901764", "0.5778642", "0.5765751", "0.573723", "0.5715603", "0.5707879", "0.5695782", "0.5695766", "0.5689964", "0.568835", "0.5687652", "0.56828564", "0.567381", "0.56633556", "0.5637668", "0.56368345", "0.56259596", "0.5608508", "0.56072676", "0.5599718", "0.559262", "0.5580342", "0.55772173", "0.5565341", "0.5549092", "0.5531572", "0.55288005", "0.55265814", "0.552291", "0.5521069", "0.55138576", "0.55072427", "0.5504756", "0.55003566", "0.5487195", "0.5485881", "0.5484878", "0.5482112", "0.5480666", "0.5480612", "0.5479063", "0.5477842", "0.5477537", "0.54745233", "0.54727757", "0.54643106", "0.54634297", "0.5459828", "0.5456137", "0.54559344", "0.54551405", "0.5453559", "0.5449618", "0.54493874", "0.54471534", "0.5445933", "0.54370743", "0.54337794", "0.5430945", "0.5408328", "0.5400209", "0.539989", "0.5394791", "0.53849787", "0.53828263", "0.53814703", "0.53802264", "0.5372191", "0.53718495", "0.5354991", "0.53491867" ]
0.6208372
7
Loads pretrained pytorch model
def load_model(model_path: str) -> object: model = torch.load(model_path) model.eval() return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")", "def loadModel(self):\n self.model.load_state_dict(torch.load(os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)), map_location=torch.device(device)))\n return self.model", "def load(model, name=\"store/base\"):\n if torch.cuda.is_available():\n pretrained_dict = torch.load(name + \".pt\")\n else:\n pretrained_dict = torch.load(name + \".pt\", map_location=torch.device('cpu'))\n print(\"Loaded\", name + \" model.\")\n model_dict = model.state_dict()\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)", "def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_trainer(self):\n super().load_trainer()\n\n logging.info(\"[Server #%d] Loading a pre-trained model.\", os.getpid())\n self.trainer.load_model()", "def load_pretrained_model(self, load_from):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except Exception as e:\n print(\"Failed to load checkpoint...\")\n print(e)", "def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model", "def load(model_path: str):\n model = torch.load(model_path)\n model.eval()\n return model", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_pretrained_model(\n init_param: str,\n model: torch.nn.Module,\n map_location: str = \"cpu\",\n):\n sps = init_param.split(\":\", 4)\n if len(sps) == 4:\n path, src_key, dst_key, excludes = sps\n elif len(sps) == 3:\n path, src_key, dst_key = sps\n excludes = None\n elif len(sps) == 2:\n path, src_key = sps\n dst_key, excludes = None, None\n else:\n (path,) = sps\n src_key, dst_key, excludes = None, None, None\n if src_key == \"\":\n src_key = None\n if dst_key == \"\":\n dst_key = None\n\n if dst_key is None:\n obj = model\n else:\n\n def get_attr(obj: Any, key: str):\n \"\"\"Get an nested attribute.\n\n >>> class A(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.linear = torch.nn.Linear(10, 10)\n >>> a = A()\n >>> assert A.linear.weight is get_attr(A, 'linear.weight')\n\n \"\"\"\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj\n\n obj = get_attr(model, dst_key)\n\n src_state = torch.load(path, map_location=map_location)\n if excludes is not None:\n for e in excludes.split(\",\"):\n src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}\n\n if src_key is not None:\n src_state = {\n k[len(src_key) + 1 :]: v\n for k, v in src_state.items()\n if k.startswith(src_key)\n }\n\n # tts.dec.feat_out,tts.dec.prob_out\n\n dst_state = obj.state_dict()\n\n for key in list(src_state.keys()):\n if src_state[key].shape != dst_state[key].shape:\n src_shape = src_state[key].shape\n dst_shape = dst_state[key].shape\n print(f'\"{key}\" shapes do not match:', src_shape, dst_shape)\n if src_shape[0] < dst_shape[0] and src_shape[1:] == dst_shape[1:]:\n print(f'doing partial override of \"{key}\"')\n dst_state[key][:src_shape[0]] = src_state[key]\n del src_state[key]\n\n dst_state.update(src_state)\n obj.load_state_dict(dst_state)", "def load_model(PATH):\n model = torch.load(PATH)\n model.eval()\n return model", "def load_model(model, device, model_path):\n if os.path.exists(model_path):\n print(\"Reading model from \", model_path)\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n model.load_state_dict(checkpoint['state_dict'])\n return model\n else:\n raise RuntimeError('Model does not exist!')", "def load_model(saved_dir):\n output_args_file = os.path.join(saved_dir, 'training_args.bin')\n args = torch.load(output_args_file)\n print('Loaded args:', args)\n tokenizer_class = get_tokenizer_class(args.model_name)\n tokenizer = tokenizer_class.from_pretrained(saved_dir)\n model_class = get_model_class(args.model_name, args.task_name)\n model = model_class.from_pretrained(saved_dir)\n return model, tokenizer, args", "def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_model(filename):\n checkpoint = torch.load(filename)\n model = QNetwork(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n return model", "def load_model(self, model_path: str):", "def import_model(path=None):\n path = get_model_path() if path is None else path\n return torch.jit.load(path)", "def load(self, model_name: str, model_dir: str = \"checkpoints\") -> None:\n self.model.load_state_dict(\n torch.load(os.path.join(model_dir, f\"{model_name}.pt\"))\n )", "def load_pretrained_model(model, pretrained_model_path, verbose=False):\n\n if isinstance(pretrained_model_path, str):\n if not os.path.exists(pretrained_model_path):\n raise IOError(\n \"Can't find pretrained model: {}\".format(pretrained_model_path)\n )\n\n print(\"Loading checkpoint from '{}'\".format(pretrained_model_path))\n pretrained_state = torch.load(pretrained_model_path)[\"state_dict\"]\n else:\n # incase pretrained model weights are given\n pretrained_state = pretrained_model_path\n\n print(len(pretrained_state), \" keys in pretrained model\")\n\n current_model_state = model.state_dict()\n print(len(current_model_state), \" keys in current model\")\n pretrained_state = {\n key: val\n for key, val in pretrained_state.items()\n if key in current_model_state and val.size() == current_model_state[key].size()\n }\n\n print(\n len(pretrained_state),\n \" keys in pretrained model are available in current model\",\n )\n current_model_state.update(pretrained_state)\n model.load_state_dict(current_model_state)\n\n if verbose:\n non_available_keys_in_pretrained = [\n key\n for key, val in pretrained_state.items()\n if key not in current_model_state\n or val.size() != current_model_state[key].size()\n ]\n non_available_keys_in_current = [\n key\n for key, val in current_model_state.items()\n if key not in pretrained_state or val.size() != pretrained_state[key].size()\n ]\n\n print(\n \"not available keys in pretrained model: \", non_available_keys_in_pretrained\n )\n print(\"not available keys in current model: \", non_available_keys_in_current)\n\n return model", "def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_model(path_model, model_type, device):\n if model_type == 'torch':\n model = torch.load(path_model).to(device)\n if hasattr(model, 'linblocks'):\n for linblock in model.linblocks:\n linblock.to(device)\n model.eval()\n return model\n elif model_type == 'sklearn':\n raise NotImplementedError\n else:\n raise Exception('Model type not known.')", "def load_pre_trained_model_state(saved_state_file_name):\n saved_state_file_name = saved_state_file_name if saved_state_file_name else \"my_checkpoint.pth\"\n loaded_state = torch.load(saved_state_file_name)\n\n pre_trained_model = models.vgg16(pretrained=True)\n pre_trained_model.name = \"vgg16\"\n\n for param in pre_trained_model.parameters():\n param.requires_grad = False\n\n pre_trained_model.class_to_idx = loaded_state['class_to_idx']\n pre_trained_model.classifier = loaded_state['classifier']\n pre_trained_model.load_state_dict(loaded_state['state_dict'])\n \n return pre_trained_model", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_model(self, filename):\r\n pass", "def load(path_to_model):\n pass", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)", "def model_load(dict_dir, cuda_flag=True):\n model_dict = torch.load(dict_dir)\n model = meta_arch()\n print(dict_dir)\n model.load_state_dict(model_dict)\n\n if cuda_flag:\n model.cuda()\n\n return model", "def load_model(\n model_file_path: str = \"\",\n model_name: str = \"default\",\n cfg_path: str = None,\n) -> torch.nn.Module:\n cfg_path = cfg_path or Path(__file__).parent / \"config.yaml\"\n # assert model_name in model_file_path.split('_')[0], \"The checkpoint doesn't match with the selected model name\"\n\n # Load config file\n cfg = load_yaml_config(cfg_path)\n\n # Load pretrained weights.\n model = get_model(model_name, cfg)\n\n state_dict = torch.load(model_file_path)\n model.load_state_dict(state_dict)\n return model", "def load_model(path, epoch, device=None):\n assert path is not None and path != ''\n\n model_name = 'model_{}.pth'.format(epoch)\n model_path = os.path.join(path, model_name)\n assert os.path.exists(model_path), 'model at {} does not exist'.format(\n model_path)\n\n if str(device) == 'cpu':\n loaded_dict = torch.load(\n model_path, map_location=lambda storage, loc: storage)\n else:\n loaded_dict = torch.load(model_path)\n\n model = loaded_dict['model']\n model.net = model.net.module\n model.to(device)\n\n return model, loaded_dict['optimizer_state_dict']", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def load_model(self):\n pass", "def load_pretrained_model(\n init_param: str,\n model: torch.nn.Module,\n ignore_init_mismatch: bool,\n map_location: str = \"cpu\",\n):\n sps = init_param.split(\":\", 4)\n if len(sps) == 4:\n path, src_key, dst_key, excludes = sps\n elif len(sps) == 3:\n path, src_key, dst_key = sps\n excludes = None\n elif len(sps) == 2:\n path, src_key = sps\n dst_key, excludes = None, None\n else:\n (path,) = sps\n src_key, dst_key, excludes = None, None, None\n if src_key == \"\":\n src_key = None\n if dst_key == \"\":\n dst_key = None\n\n if dst_key is None:\n obj = model\n else:\n\n def get_attr(obj: Any, key: str):\n \"\"\"Get an nested attribute.\n\n >>> class A(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.linear = torch.nn.Linear(10, 10)\n >>> a = A()\n >>> assert A.linear.weight is get_attr(A, 'linear.weight')\n\n \"\"\"\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj\n\n obj = get_attr(model, dst_key)\n\n src_state = torch.load(path, map_location=map_location)\n if excludes is not None:\n for e in excludes.split(\",\"):\n src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}\n\n if src_key is not None:\n src_state = {\n k[len(src_key) + 1 :]: v\n for k, v in src_state.items()\n if k.startswith(src_key)\n }\n\n dst_state = obj.state_dict()\n if ignore_init_mismatch:\n src_state = filter_state_dict(dst_state, src_state)\n dst_state.update(src_state)\n obj.load_state_dict(dst_state)", "def load_model(model: nn.Module, model_args: dict, model_weights: str, device: torch.device):\n model = model(**model_args)\n state_dict = torch.load(model_weights, map_location=device)\n model.load_state_dict(state_dict[\"model\"])\n return model", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def load_model(save_dir, gpu_id):\n args_save_name = os.path.join(save_dir, 'args.pt')\n args = torch.load(args_save_name)\n\n # initialize path variables, logging, and database, and the GPU/ CPU to use\n #device = torch.device(f\"cuda:{gpu_id}\" if (torch.cuda.is_available() and gpu_id != -1) else \"cpu\")\n device = torch.device(\"cpu\")\n\n # generate intent class dict\n intent_list_save_name = os.path.join(save_dir, 'intent_list.pt')\n intent_list = torch.load(intent_list_save_name)\n\n # generate the deep intent model\n #if args.encoder == 'distil-m-bert-fine-tuned':\n # model = MyDistilBertForClassification('/data/distilmbert_finetuned/', intent_list, args)\n model = SentenceClassificationModel(intent_list, args)\n if args.multi_gpu and torch.cuda.device_count() > 1:\n print(\"Use\", torch.cuda.device_count(), \"GPUs\")\n model = torch.nn.DataParallel(model)\n\n # load pretrained model\n print(f'loading model {save_dir}')\n model_save_name = os.path.join(save_dir, 'model.pt')\n model.load_state_dict(torch.load(model_save_name))\n model.to(device)\n\n return model", "def load_pretrainedmodels(model_name):\n\n # Lazy import as torchvision may not be required.\n import pretrainedmodels\n\n model = getattr(pretrainedmodels, model_name)().float().eval()\n input_shape = [1, *model.input_size]\n input_data = torch.rand(input_shape).float() * 256\n for channel in range(3):\n input_data[:, channel] -= model.mean[channel]\n input_data[:, channel] /= model.std[channel]\n return model, [input_data]", "def load_trained_net(mal):\n model_root = os.path.join(os.getcwd(), 'data', 'models')\n model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))\n\n return model", "def load_model():\r\n global model # 下面的那个predict也是要用的 所以在这里定义为全局\r\n model = DenseNet(n_input_channels=1, num_init_features=64,\r\n growth_rate=32,\r\n block_config=(3, 6, 12, 8), num_classes=4).to(device)\r\n model.load_state_dict(torch.load(\"./model29.pkl\"))\r\n model.eval()", "def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)", "def load_model(model_dir):\n if not osp.exists(model_dir):\n logging.error(\"model_dir '{}' does not exists!\".format(model_dir))\n if not osp.exists(osp.join(model_dir, \"model.yml\")):\n raise Exception(\"There's no model.yml in {}\".format(model_dir))\n with open(osp.join(model_dir, \"model.yml\")) as f:\n model_info = yaml.load(f.read(), Loader=yaml.Loader)\n f.close()\n\n version = model_info['version']\n if int(version.split('.')[0]) < 2:\n raise Exception(\n 'Current version is {}, a model trained by PaddleX={} cannot be load.'.\n format(paddlex.__version__, version))\n\n status = model_info['status']\n\n if not hasattr(paddlex.cv.models, model_info['Model']):\n raise Exception(\"There's no attribute {} in paddlex.cv.models\".format(\n model_info['Model']))\n if 'model_name' in model_info['_init_params']:\n del model_info['_init_params']['model_name']\n\n with paddle.utils.unique_name.guard():\n model = getattr(paddlex.cv.models, model_info['Model'])(\n **model_info['_init_params'])\n\n if 'Transforms' in model_info:\n model.test_transforms = build_transforms(model_info['Transforms'])\n\n if '_Attributes' in model_info:\n for k, v in model_info['_Attributes'].items():\n if k in model.__dict__:\n model.__dict__[k] = v\n\n if status == 'Pruned' or osp.exists(osp.join(model_dir, \"prune.yml\")):\n with open(osp.join(model_dir, \"prune.yml\")) as f:\n pruning_info = yaml.load(f.read(), Loader=yaml.Loader)\n inputs = pruning_info['pruner_inputs']\n if model.model_type == 'detector':\n inputs = [{\n k: paddle.to_tensor(v)\n for k, v in inputs.items()\n }]\n model.pruner = getattr(paddleslim, pruning_info['pruner'])(\n model.net, inputs=inputs)\n model.pruning_ratios = pruning_info['pruning_ratios']\n model.pruner.prune_vars(\n ratios=model.pruning_ratios,\n axis=paddleslim.dygraph.prune.filter_pruner.FILTER_DIM)\n\n if status == 'Quantized':\n with open(osp.join(model_dir, \"quant.yml\")) as f:\n quant_info = yaml.load(f.read(), Loader=yaml.Loader)\n quant_config = quant_info['quant_config']\n model.quantizer = paddleslim.QAT(quant_config)\n model.quantizer.quantize(model.net)\n\n if status == 'Infer':\n if model_info['Model'] in ['FasterRCNN', 'MaskRCNN']:\n net_state_dict = load_rcnn_inference_model(model_dir)\n else:\n net_state_dict = paddle.load(osp.join(model_dir, 'model'))\n else:\n net_state_dict = paddle.load(osp.join(model_dir, 'model.pdparams'))\n model.net.set_state_dict(net_state_dict)\n\n logging.info(\"Model[{}] loaded.\".format(model_info['Model']))\n model.status = status\n return model", "def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr", "def load_torchvision(model_name):\n # Lazy import as torchvision may not be required.\n import torchvision\n\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]", "def load_openai_pretrained_model(model, cfg, n_special, dir):\n n_ctx = cfg.n_ctx\n n_embd = cfg.n_embd\n n_transfer = cfg.n_layer\n # Load weights from TF model\n print(\"Loading weights...\")\n names = json.load(open(dir + 'parameters_names.json'))\n shapes = json.load(open(dir + 'params_shapes.json'))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n if n_ctx > 0:\n init_params[0] = init_params[0][:n_ctx]\n if n_special > 0:\n init_params[0] = np.concatenate(\n [init_params[1],\n (np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),\n init_params[0]\n ], 0)\n else:\n init_params[0] = np.concatenate(\n [init_params[1],\n init_params[0]\n ], 0)\n del init_params[1]\n if n_transfer == -1:\n n_transfer = 0\n else:\n n_transfer = 1 + n_transfer * 12\n init_params = [arr.squeeze() for arr in init_params]\n\n try:\n assert model.embed.weight.shape == init_params[0].shape\n except AssertionError as e:\n e.args += (model.embed.weight.shape, init_params[0].shape)\n raise\n\n model.embed.weight.data = torch.from_numpy(init_params[0])\n\n for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):\n name = name[6:] # skip \"model/\"\n assert name[-2:] == \":0\"\n name = name[:-2]\n name = name.split('/')\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+\\d+', m_name):\n l = re.split(r'(\\d+)', m_name)\n else:\n l = [m_name]\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n try:\n assert pointer.shape == ip.shape\n except AssertionError as e:\n e.args += (pointer.shape, ip.shape)\n raise\n pointer.data = torch.from_numpy(ip)", "def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel", "def load_model(model_name):\n model_def_path = os.path.join(MODEL_DIR, model_name + \".py\")\n weights_path = os.path.join(MODEL_DIR, model_name + \".pth\")\n if six.PY3:\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(model_name,\n model_def_path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n import importlib\n dirname = os.path.dirname(model_def_path)\n sys.path.insert(0, dirname)\n module_name = os.path.splitext(os.path.basename(model_def_path))[0]\n mod = importlib.import_module(module_name)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n net = modify_to_return_embeddings(net, model_name)\n return net", "def load_pretrained_weights(model, model_name, load_fc=True):\n state_dict = torch.load(url_map[model_name])\n if load_fc:\n model.load_state_dict(state_dict)\n else:\n state_dict.pop('_fc.weight')\n state_dict.pop('_fc.bias')\n res = model.load_state_dict(state_dict, strict=False)\n assert str(res.missing_keys) == str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'\n print('Loaded pretrained weights for {}'.format(model_name))", "def load_model(self, filename):\n event = teca_time_py_event('teca_deeplab_ar_detect::load_model')\n\n # this creates OpenMP thread pools and imports torch\n # it must be called *before* we import torch\n self.initialize()\n\n # import our torch codes only now that torch has been initialized\n global teca_deeplab_ar_detect_internals\n from teca_deeplab_ar_detect_internals \\\n import teca_deeplab_ar_detect_internals\n\n # create an instance of the model\n model = teca_deeplab_ar_detect_internals.DeepLabv3_plus(\n n_classes=1, _print=False)\n\n # load model weights from state on disk\n super().load_model(filename, model)", "def load_model(self):\n with open(self.args.trained_model, 'rb') as handle:\n self.model_hash = hashlib.sha224(handle.read()).hexdigest()\n\n self.model.load(self.args.trained_model)\n self.logger.debug('Loaded model from %s', self.args.trained_model)\n return", "def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)", "def load_pretrained_rnn(model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n rnn = RNN_model(model_fn = RNN_model.set_model_from_file,\n model_dir = model_dir,\n **rnn_params)\n\n # Compile model\n rnn.model_fn()\n\n return rnn", "def load_model(model_path: str, tokenizer: SimpleTokenizer): \n VOCAB_SIZE = len(tokenizer.token2idx)\n\n model = nn.Sequential(\n nn.Embedding(VOCAB_SIZE, EMBEDDING_SIZE, padding_idx=tokenizer.pad),\n nn.Flatten(),\n nn.Linear(EMBEDDING_SIZE * MAX_LENGTH, 10),\n nn.ReLU(),\n nn.Linear(10, 1),\n nn.Sigmoid()\n )\n\n model.load_state_dict(torch.load(model_path))\n return model", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_pretrained_model(self, model_path):\n # My eyes and my heart both hurt when writing this method\n\n # Only care about layer_types that have trainable parameters\n ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData']\n\n def _get_layer_params(layer, ltype):\n\n if ltype == 'BNData': \n n_channels = layer.blobs[0].shape.dim[1]\n gamma = np.array(layer.blobs[0].data).reshape(n_channels)\n beta = np.array(layer.blobs[1].data).reshape(n_channels)\n mean = np.array(layer.blobs[2].data).reshape(n_channels)\n var = np.array(layer.blobs[3].data).reshape(n_channels)\n return [mean, var, gamma, beta]\n\n elif ltype in ['ConvolutionData', 'HoleConvolutionData']:\n is_bias = layer.convolution_param.bias_term\n shape = [int(d) for d in layer.blobs[0].shape.dim]\n weights = np.array(layer.blobs[0].data).reshape(shape)\n bias = []\n if is_bias:\n bias = np.array(layer.blobs[1].data).reshape(shape[0])\n return [weights, bias]\n \n elif ltype == 'InnerProduct':\n raise Exception(\"Fully connected layers {}, not supported\".format(ltype))\n\n else:\n raise Exception(\"Unkown layer type {}\".format(ltype))\n\n\n net = caffe_pb2.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n # dict formatted as -> key:<layer_name> :: value:<layer_type>\n layer_types = {}\n # dict formatted as -> key:<layer_name> :: value:[<list_of_params>]\n layer_params = {}\n\n for l in net.layer:\n lname = l.name\n ltype = l.type\n if ltype in ltypes:\n print(\"Processing layer {}\".format(lname))\n layer_types[lname] = ltype\n layer_params[lname] = _get_layer_params(l, ltype)\n\n # Set affine=False for all batchnorm modules\n def _no_affine_bn(module=None):\n if isinstance(module, nn.BatchNorm2d):\n module.affine = False\n\n if len([m for m in module.children()]) > 0:\n for child in module.children():\n _no_affine_bn(child)\n\n #_no_affine_bn(self)\n\n\n def _transfer_conv(layer_name, module):\n weights, bias = layer_params[layer_name]\n w_shape = np.array(module.weight.size())\n \n np.testing.assert_array_equal(weights.shape, w_shape)\n print(\"CONV: Original {} and trans weights {}\".format(w_shape,\n weights.shape))\n module.weight.data = torch.from_numpy(weights)\n\n if len(bias) != 0:\n b_shape = np.array(module.bias.size())\n np.testing.assert_array_equal(bias.shape, b_shape)\n print(\"CONV: Original {} and trans bias {}\".format(b_shape,\n bias.shape))\n module.bias.data = torch.from_numpy(bias)\n\n\n def _transfer_conv_bn(conv_layer_name, mother_module):\n conv_module = mother_module[0]\n bn_module = mother_module[1]\n \n _transfer_conv(conv_layer_name, conv_module)\n \n mean, var, gamma, beta = layer_params[conv_layer_name+'/bn']\n print(\"BN: Original {} and trans weights {}\".format(bn_module.running_mean.size(),\n mean.shape))\n bn_module.running_mean = torch.from_numpy(mean)\n bn_module.running_var = torch.from_numpy(var)\n bn_module.weight.data = torch.from_numpy(gamma)\n bn_module.bias.data = torch.from_numpy(beta)\n\n\n def _transfer_residual(prefix, block):\n block_module, n_layers = block[0], block[1]\n\n bottleneck = block_module.layers[0]\n bottleneck_conv_bn_dic = {prefix + '_1_1x1_reduce': bottleneck.cbr1.cbr_unit,\n prefix + '_1_3x3': bottleneck.cbr2.cbr_unit,\n prefix + '_1_1x1_proj': bottleneck.cb4.cb_unit,\n prefix + '_1_1x1_increase': bottleneck.cb3.cb_unit,}\n\n for k, v in bottleneck_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n for layer_idx in range(2, n_layers+1):\n residual_layer = block_module.layers[layer_idx-1]\n residual_conv_bn_dic = {'_'.join(map(str, [prefix, layer_idx, '1x1_reduce'])): residual_layer.cbr1.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '3x3'])): residual_layer.cbr2.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '1x1_increase'])): residual_layer.cb3.cb_unit,} \n \n for k, v in residual_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n\n convbn_layer_mapping = {'conv1_1_3x3_s2': self.convbnrelu1_1.cbr_unit,\n 'conv1_2_3x3': self.convbnrelu1_2.cbr_unit,\n 'conv1_3_3x3': self.convbnrelu1_3.cbr_unit,\n 'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit, \n 'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit,\n 'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit,\n 'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit,\n 'conv5_4': self.cbr_final.cbr_unit,}\n\n residual_layers = {'conv2': [self.res_block2, self.block_config[0]],\n 'conv3': [self.res_block3, self.block_config[1]],\n 'conv4': [self.res_block4, self.block_config[2]],\n 'conv5': [self.res_block5, self.block_config[3]],}\n\n # Transfer weights for all non-residual conv+bn layers\n for k, v in convbn_layer_mapping.items():\n _transfer_conv_bn(k, v)\n\n # Transfer weights for final non-bn conv layer\n _transfer_conv('conv6', self.classification)\n\n # Transfer weights for all residual layers\n for k, v in residual_layers.items():\n _transfer_residual(k, v)", "def load_model(net,\n file_path,\n ignore_extra=True):\n import torch\n\n if ignore_extra:\n pretrained_state = torch.load(file_path)\n model_dict = net.state_dict()\n pretrained_state = {k: v for k, v in pretrained_state.items() if k in model_dict}\n net.load_state_dict(pretrained_state)\n else:\n net.load_state_dict(torch.load(file_path))", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load_model(self):\n if torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\n else:\n map_location='cpu'\n\n for index, agent in enumerate(self.agents):\n agent.actor_local.load_state_dict(torch.load('agent{}_checkpoint_actor.pth'.format(index + 1), map_location=map_location))\n agent.critic_local.load_state_dict(torch.load('agent{}_checkpoint_critic.pth'.format(index + 1), map_location=map_location))", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_model(self):\n self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder)\n\n assert os.path.isdir(self.opt.load_weights_folder), \\\n \"Cannot find folder {}\".format(self.opt.load_weights_folder)\n print(\"loading model from folder {}\".format(self.opt.load_weights_folder))\n\n for model_name in [\"encoder\", \"decoder\"]:\n print(\"Loading {} weights...\".format(model_name))\n path = os.path.join(self.opt.load_weights_folder, \"{}.pth\".format(model_name))\n model_dict = self.encoder.state_dict() if model_name == \"encoder\" else self.decoder.state_dict()\n pretrained_dict = torch.load(path)\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n if model_name == \"encoder\":\n self.encoder.load_state_dict(model_dict)\n else:\n self.decoder.load_state_dict(model_dict)\n\n # loading adam state\n optimizer_load_path = os.path.join(self.opt.load_weights_folder, \"adam.pth\")\n if os.path.isfile(optimizer_load_path):\n print(\"Loading Adam weights\")\n optimizer_dict = torch.load(optimizer_load_path)\n self.optimizer.load_state_dict(optimizer_dict)\n else:\n print(\"Cannot find Adam weights so Adam is randomly initialized\")", "def load_model(self):\n if os.path.exists(self.model_filename):\n self.model.load_weights(self.model_filename)", "def load_pretrained_AlexNet(model, progress=True):\n# def alexnet(pretrained=False, progress=True, **kwargs):\n\n\t__all_ = [\"AlexNet\", \"alexnet\", \"Alexnet\"]\n\n\tmodel_url = {\n\t 'alexnet':'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',\n\t}\n\n\tprint(\"loading pre-trained AlexNet...\")\n\tstate_dict = load_state_dict_from_url(model_url['alexnet'], progress=progress)\n\tmodel_dict = model.state_dict()\n\n\t# filter out unmatching dictionary\n\t# reference: https://github.com/SSARCandy/DeepCORAL/blob/master/main.py\n\tstate_dict = {k: v for k, v in state_dict.items() if k in model_dict}\n\n\tmodel_dict.update(state_dict)\n\tmodel.load_state_dict(state_dict)\n\tprint(\"loaded model correctly...\")", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def loadModel(name, path=None):\n\n # if a path is given, try to load from that path first\n if path:\n try:\n model = TFT5ForConditionalGeneration.from_pretrained(path)\n tokenizer = T5Tokenizer.from_pretrained(path)\n \n return model, tokenizer\n except:\n print(f\"WARNING: Could not load the model from the path ({path}) specified with --from-pretrained flag. Trying to load '{name}' from cloud instead.\")\n\n # if no path was specified, or the load from path failed, try to load from cloud using the given model name\n model = TFT5ForConditionalGeneration.from_pretrained(name)\n tokenizer = T5Tokenizer.from_pretrained(name)\n \n return model, tokenizer", "def load(self, model_name_or_path):\n return BertMLM(model_name_or_path, self.top_k)", "def init_pretrained_weights(model, key=''):\n import os\n import errno\n import gdown\n from collections import OrderedDict\n\n def _get_torch_home():\n ENV_TORCH_HOME = 'TORCH_HOME'\n ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'\n DEFAULT_CACHE_DIR = '~/.cache'\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME,\n os.path.join(\n os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'\n )\n )\n )\n return torch_home\n\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, 'checkpoints')\n try:\n os.makedirs(model_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # Directory already exists, ignore.\n pass\n else:\n # Unexpected OSError, re-raise.\n raise\n filename = key + '_imagenet.pth'\n cached_file = os.path.join(model_dir, filename)\n\n if not os.path.exists(cached_file):\n gdown.download(pretrained_urls[key], cached_file, quiet=False)\n\n state_dict = torch.load(cached_file)\n model_dict = model.state_dict()\n new_state_dict = OrderedDict()\n matched_layers, discarded_layers = [], []\n\n for k, v in state_dict.items():\n if k.startswith('module.'):\n k = k[7:] # discard module.\n\n if k in model_dict and model_dict[k].size() == v.size():\n new_state_dict[k] = v\n matched_layers.append(k)\n else:\n discarded_layers.append(k)\n\n model_dict.update(new_state_dict)\n model.load_state_dict(model_dict)\n\n if len(matched_layers) == 0:\n warnings.warn(\n 'The pretrained weights from \"{}\" cannot be loaded, '\n 'please check the key names manually '\n '(** ignored and continue **)'.format(cached_file)\n )\n else:\n print(\n 'Successfully loaded imagenet pretrained weights from \"{}\"'.\n format(cached_file)\n )\n if len(discarded_layers) > 0:\n print(\n '** The following layers are discarded '\n 'due to unmatched keys or layer size: {}'.\n format(discarded_layers)\n )", "def load_pretrained_net_weights(net, ckpt_path):\n print(\"Loading Model: \", ckpt_path)\n print('')\n\n net.load_weights(ckpt_path).expect_partial()", "def load_model_from_file(model: torch.nn.Module, model_file_path: Path) -> None:\n\n if model_file_path.is_file():\n try:\n model.load_state_dict(torch.load(model_file_path))\n except Exception as e:\n logging.warning(\"Couldn't load model. Attempting to map CUDA tensors to CPU to solve error.\")\n else:\n logging.warning(\"Could not find model: {}\".format(model_file_path))\n raise FileExistsError(f\"Cannot load model file {model_file_path} into {model}...\")", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def load_trained_model(log_dir):\n model = create_model()\n\n model.load_weights(os.path.join(log_dir, \"best_model.h5\"))\n return model", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def load_model(model):\n fin = False\n backup1 = False\n backup2 = False\n\n if os.path.exists(\"TrainedModel/finalModel.pth\"):\n fin = True\n elif os.path.exists(\"TrainedModel/modelBackup.pth\"):\n backup1 = True\n elif os.path.exists(\"TrainedModel/modelBackupBackup.pth\"):\n backup2 = True\n\n if fin:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/finalModel.pth\"))\n return model\n except:\n print(\"finalModel seems to be corrupted, trying a backup...\")\n \n if fin or backup1:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackup.pth\"))\n return model\n except:\n print(\"modelBackup seems to be corrupted, trying a backup...\")\n\n if fin or backup1 or backup2:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackupBackup.pth\"))\n return model\n except:\n print(\"modelBackupBackup seems to be corrupted, you're at the end of the line.\")\n\n print(\"There doesn't seem to be anything to load.\")\n return model", "def load_model(self) -> Any:", "def load_pretrained_model(self, load_from, load_partial_graph=False):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n if load_partial_graph and len(self.load_parameter_keywords) > 0:\n tmp_pretrained_dict = {}\n for k, v in pretrained_dict.items():\n for keyword in self.load_parameter_keywords:\n if keyword in k:\n tmp_pretrained_dict[k] = v\n break\n pretrained_dict = tmp_pretrained_dict\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except:\n print(\"Failed to load checkpoint...\")", "def load_and_fuse_pretrained_weights(model: torch.nn.Module, opt: argparse.ArgumentParser) -> torch.nn.Module:\n model.optimize_for_inference(fuse_all_layers=False)\n fused_state_dict = load_weights(opt.weights)\n fused_state_dict = map_key_names(fused_state_dict, model.state_dict())\n model.load_state_dict(fused_state_dict)\n return model", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(self):\n # Load the model\n print('Loading model:', self.model_path)\n t0 = time.time()\n model = load_model(self.model_path)\n t1 = time.time()\n print('Loaded in:', t1 - t0)\n return model", "def load_actor(self, checkpoint):\n \n model = torch.load(checkpoint)\n self.actor_local.load_state_dict(model)", "def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model" ]
[ "0.82864565", "0.8224309", "0.81519234", "0.7998859", "0.7852287", "0.7793966", "0.7782619", "0.7755613", "0.7727618", "0.7702701", "0.767298", "0.7656771", "0.7558395", "0.7558245", "0.75549746", "0.75289506", "0.7481736", "0.747583", "0.743038", "0.741285", "0.74045056", "0.7383925", "0.73705393", "0.7359493", "0.7335725", "0.72931254", "0.7280269", "0.7267051", "0.72414696", "0.7241164", "0.7235896", "0.7228618", "0.721257", "0.72042453", "0.7184205", "0.715618", "0.7146677", "0.71444833", "0.71290594", "0.71219623", "0.7119306", "0.7104033", "0.7101744", "0.7101424", "0.70998806", "0.70993114", "0.70912874", "0.7084402", "0.7059665", "0.70508385", "0.70403284", "0.7020523", "0.70155233", "0.7015284", "0.70138454", "0.70121264", "0.70103776", "0.70048726", "0.70045686", "0.6995562", "0.6994272", "0.69706416", "0.6968327", "0.696331", "0.6955487", "0.6955373", "0.69540477", "0.6951319", "0.69075876", "0.69061816", "0.6904712", "0.68978316", "0.68933105", "0.68933105", "0.6885028", "0.6883711", "0.6876196", "0.68692684", "0.68623775", "0.68582875", "0.68563116", "0.6851486", "0.6848957", "0.68478906", "0.6834328", "0.6832718", "0.68274635", "0.68267846", "0.6819812", "0.6817586", "0.68101233", "0.68095446", "0.6805617", "0.6805539", "0.68050927", "0.68045163", "0.6802444", "0.68010366", "0.6796725", "0.67941445" ]
0.7447873
18
Given image file predict and return class label
def inference_on_data(image) -> str: result = inference_model(image) class_label = torch.argmax(result[0]) # Print to log acts as a proxy of saving to an actual DB print(f'Image Class : {class_label}') return str(class_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)", "def predict(model, images):\n return model.predict_classes(images)", "def predict(cls, image_path: str) -> tuple:\n\n print(\"Classify input image: \")\n return cls.model.predict(image_path)", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def model_predict(img_path, model_path):\n learn = load_model(model_path)\n img = open_image(img_path)\n # get the outputs from the model\n pred_class,pred_idx,outputs = learn.predict(img)\n # return the classification the model returns\n return pred_class", "def classify(neural_net, image_file):\n\timg = Image.open(image_file)\n\timg.load()\n\timg_array = np.asarray(img)\n\timg_array.shape = (1, 100, 100, 3)\n\n\tprediction = model.predict(img_array)[0][0]\n\treturn prediction", "def get_classification(self, image):\n # Image pre-processing pipeline\n img = cv2.resize(image, None, fx=0.5, fy=0.5)\n img = img.astype(np.float32)\n img = keras.applications.vgg16.preprocess_input(img)\n # Execute prediction\n probs = self.model.predict(np.array([img]), batch_size=1, verbose=1)[0]\n # get label with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n label = self.predictionary[g_x]\n rospy.loginfo(\"label: %d, conf: %f, %f, %f, %f\", g_x, probs[0], probs[1], probs[2], probs[3])\n return label", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict_word(word_path):\n word = word_path\n reshaped_word = shape_new_img(word)\n pred = model.predict(reshaped_word)\n get_class = np.argmax(pred)\n prediction = labels_list[get_class]\n return prediction", "def predict_class(self, image_path):\n\n img_array = self.process_image(image_path)\n predictions = self.model.predict(img_array)\n vehicle = self._mappings[np.argmax(abs(predictions))]\n return vehicle", "def classify_image(img_path: str, model=None, pretrained_state_path: str = None):\n if model is None:\n if pretrained_state_path is None:\n model = models.vgg16(pretrained=True)\n else:\n state_dict = torch.load(pretrained_state_path)\n model = models.vgg16()\n model.load_state_dict(state_dict)\n img = preprocess_image(img_path)\n output = model(img)\n # Getting the max of the soft max layer.\n prediction = output.data.numpy().argmax()\n return labels[prediction]", "def predict(self, image) -> tuple:\n if type(image) == bytes: # allow to pass binary image file content\n img = Image.open(BytesIO(image))\n img = img.convert(\"L\").resize(self.input_size) # convert(\"L\") -> grayscale\n else: # otherwise expect filepath\n img = load_img(image, color_mode='grayscale', target_size=self.input_size)\n data = img_to_array(img)/255 # normalize pixel intensity -> [0,1]\n data = data.reshape((1,) + data.shape)\n with self.graph.as_default():\n with self.session.as_default():\n prediction = self.model.predict(data)\n # generate and return the (class, confidence) tuple\n if self.is_binary:\n if prediction[0][0] <= 0.5:\n return (self.classes[0], float(1.0 - prediction[0][0]))\n return (self.classes[1], float(prediction[0][0]))\n return (self.classes[np.argmax(prediction[0])], float(np.max(prediction[0])))", "def predict(self, image_file):\n image = misc.imread(image_file)\n return self.predict_from_ndarray(image)", "def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score", "def predict(self, img):\n return self._predict([img])[0]", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def classify_image(image):\n image_path = image.filename\n image_data = np.array(Image.open(image.stream))\n image_data = skimage.img_as_float(image_data).astype(np.float2)\n with classifier_lock:\n classification = classifier.predict([image_data])[0]\n return {\"suggested_tags\": predicted_tags(classification),\n \"classification_vector\": classification,\n \"image_url\": image_path}", "def predict(self, image_path, save_vis=False, save_dir=None):\n print(image_path)\n image = cv2.imread(image_path)\n results = self.model.detect([image], verbose=0)\n r = results[0]\n image_id=os.path.split(image_path)[1][0:-4]\n if save_vis:\n class_names = ['Bench', 'Billboard', 'Catch Basin', 'CCTV Camera', 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole', 'Phone Booth', 'Street Light', 'Pole', 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light', 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can']\n visualize.save_image(image = image[:,:,::-1], image_name=image_id, boxes=r['rois'], masks=r['masks'], class_ids=r['class_ids'], class_names=class_names, scores=r['scores'], save_dir=save_dir)\n features = {'image_id': image_id, 'classes': r['class_ids'].tolist(), 'boxes': r['rois'].tolist()}\n return features, r['masks']", "def predict(model, input_file):\n if input_file.endswith(\".json\"):\n with open(input_file,\"w\") as fd:\n data = json.loads(input_file)\n else:\n data = imread(input_file)\n result = model.predict(data)\n print(\"Model predicted class: %s\"%result)\n return result", "def classify(img, c_model):\n #global class_graph\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions", "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "def main(image, model_dir):\n model_file, signature = get_model_and_sig(model_dir)\n interpreter = load_model(model_dir + model_file)\n prediction = get_prediction(image, interpreter, signature)\n # get list of confidences from prediction\n confidences = list(prediction.values())[0]\n # get the label name for the predicted class\n labels = signature.get(\"classes\").get(\"Label\")\n max_confidence = max(confidences)\n prediction[\"Prediction\"] = labels[confidences.index(max_confidence)]\n return prediction", "def predict_picture_labels(picture):\n image_url = picture.image.url\n response = requests.post(PREDICT_ENDPOINT, json={\n 'image_url': image_url,\n })\n data = response.json()\n # Get the best matching labels\n right_labels = data['right_probabilities']\n right_label_id, right_label_prob = max(right_labels.items(), key=lambda i: i[1])\n left_labels = data['left_probabilities']\n left_label_id, left_label_prob = max(left_labels.items(), key=lambda i: i[1])\n # Save the labels\n picture.recognized_left_label_id = left_label_id\n picture.recognized_left_probability = left_label_prob\n picture.recognized_right_label_id = right_label_id\n picture.recognized_right_probability = right_label_prob\n picture.save()\n return picture", "def predictImage(model, image):\n # Reshape\n x = image[np.newaxis, ::]\n\n # Standardise range\n x = x.astype(np.float32) / 255.\n\n # Prediction\n preds = model.predict(x)[0].reshape(image.shape[0],\n image.shape[0],\n model.layers[-1].output_shape[-1])\n # class_img\n class_img = np.argmax(preds, axis=-1)\n\n return (preds, class_img)", "def predict(self, image_to_predict):\n\n y_pred = self.classifier.predict(image_to_predict)\n\n return y_pred", "def get_classification(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (360,270), interpolation = cv2.INTER_CUBIC)\n\n with self.graph.as_default():\n result = self.model.predict(image[None, :, :, :], batch_size=1).squeeze()\n id = np.argmax(result)\n return id, result[id]", "def predict_category(img_path):\n tensor = path_to_tensor(img_path)\n # WE need to send a tensor to find the bottelneck feature so cnverting the image to a tensor\n\n\n prediction = model_final.predict(tensor)\n\n return np.argmax(prediction)", "def predict_one_image(img_path, prediction_model):\n # Load image and resize it\n img = image.load_img(img_path, target_size=(224, 224))\n # Transform it in array\n x = image.img_to_array(img)\n # Expand array dimension\n x = np.expand_dims(x, axis=0)\n # Make prediction\n prediction_score = prediction_model.predict(x)\n return prediction_score", "def predict(image_path):\n global graph\n with graph.as_default():\n image_size = (299, 299)\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def classify(\n self, image_path: Optional[str] = None, image_data: Optional[bytes] = None\n ):\n\n tensor = None\n if image_path:\n tensor = self.__load_image(image_path=image_path)\n elif image_data:\n tensor = self.__load_image_from_bytes(image_data=image_data)\n if tensor is None:\n raise Exception(\"Please provide image path or data of your image!\")\n\n output = self.model(self.__batch_data(tensor))\n predicted = torch.argmax(output)\n classes = constant.classes\n prediction_class = classes[int(predicted.item())]\n return prediction_class", "def predict(self, datafile):", "def student_model_predict_label_h5(img_path):\n proc_tensor = path_to_tensor(img_path)\n return probs_for_breeds_h5(test_student_model.predict(proc_tensor))", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def get_classification(self, image):\n # Image pre-processing pipleine\n img = np.float32(image)\n img = preprocess_input(img)\n img = cv2.resize(img, (299, 299))\n img = np.expand_dims(img, 0)\n # Execute model's predictions - return probability value for each of 4 classes\n probs = self.model.predict(img)[0]\n # get class with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident about the prediction\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n # Swap label values as model was trained with different label values\n if g_x == 2:\n prediction = 0 # Red\n elif g_x == 0:\n prediction = 2 # Green\n elif g_x == 3:\n prediction = 1 # Yellow\n else:\n prediction = 3 # No light\n\n # Log the message\n rospy.loginfo(\"The label returned is %d\", prediction)\n\n # Return the light state corresponding to the index\n return prediction", "def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label", "def predict(self, image, resize_to=MNIST_IMG_SIZE):\n x = self.pre_process(image, resize_to)\n\n # seems Keras has a unclosed bug when dealing with threads\n # https://github.com/fchollet/keras/issues/2397\n with self._graph.as_default():\n label = self._model.predict_classes(x, batch_size=1, verbose=0)\n\n return label[0]", "def get_classification(self, image):\n\n # Convert image to PIL RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # add a fourth batch dimension to array\n image = np.expand_dims(image, axis=0)\n\n ## Predict images class\n if image.shape==(1, self.img_height, self.img_width, self.img_channels):\n y_pred = self.model.predict(image)\n else:\n rospy.logwarn(\"tl_classifier: Wrong image shape: {},{},{},{}\".format(image.shape[0],image.shape[1],image.shape[2],image.shape[3]))\n return TrafficLight.UNKNOWN\n\n # Filter predictions\n confidence_threshold = 0.7\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n\n # Output predicted classes and scores\n #rospy.loginfo(\"tl_classifier: class conf xmin ymin xmax ymax\")\n \n # Filter classes prediction\n tl_pred_classes = y_pred_thresh[0][:,0]\n tl_pred_scores = y_pred_thresh[0][:,1]\n # Find classes that contains tl's\n tl_pred_classes = [cl for cl in tl_pred_classes if 1<=cl<=3]\n\n\n # Test light state (if prediction is not empty)\n if len(tl_pred_classes) > 0:\n if (tl_pred_classes[0]==1):\n tl_return = TrafficLight.GREEN\n rospy.loginfo(\"tl_classifier: Green detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==2):\n tl_return = TrafficLight.YELLOW\n rospy.loginfo(\"tl_classifier: Yellow detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==3):\n tl_return = TrafficLight.RED\n rospy.loginfo(\"tl_classifier: Red detected, score {:.2f}\".format(tl_pred_scores[0]))\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Other class detected!\")\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Unknown detected!\")\n\n\n return tl_return", "def predict(self, images, batch_size):\n pass", "def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]", "def get_classification(self, image):\n if self.model is not None:\n im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im = im.astype('float32')\n im = preprocess_input(im)\n im_array = np.asarray(im)\n transformed_im_array = im_array[None, :, :, :]\n with self.graph.as_default():\n preds = self.model.predict(transformed_im_array, batch_size=1)\n return np.argmax(preds[0])\n return TrafficLight.UNKNOWN", "def predict():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return 'No file found'\n user_file = request.files['file']\n if user_file.filename == '':\n return 'file name not found …'\n else:\n path=os.path.join(os.getcwd()+user_file.filename)\n user_file.save(path)\n K.clear_session() \n classes = pred(path)\n K.clear_session() \n \n return jsonify({\n \"status\":\"success\",\n \"prediction\":classes[0],\n \"confidence\":str(classes[1])\n })", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def predict_file(img_path, model):\n return gennet.predict_file(img_path, 'Resnet50', model)", "def get_classification(self, image):\n #TODO implement light color prediction\n predict = TrafficLight.UNKNOWN\n if self.predict is not None:\n # expand image dimensions\n image_expanded = np.expand_dims(image, axis=0)\n # run detection\n (scores, classes, num) = self.tf_session.run(\n [self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_expanded})\n\n # reduce the dimensions\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n # calculate prediction\n cc = classes[0]\n confidence = scores[0]\n \n if cc > 0 and cc <= 4 and confidence is not None and confidence > THRESHOLD:\n predict = self.clabels[cc]\n else:\n predict = TrafficLight.UNKNOWN\n\n if predict == TrafficLight.RED: \n Light_status = 'Red'\n elif predict == TrafficLight.GREEN:\n Light_status = 'Green'\n elif predict == TrafficLight.YELLOW:\n Light_status = 'Yellow'\n else:\n Light_status = 'Unknown'\n print('Light is ',Light_status)\n\n return predict", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def get_classification(self, image):\n # Run inference on image\n prediction = None\n prediction = inferOnImage(self.sess, self.model_logits, self.X, image)\n\n # Convert number into label just for debug\n prediction_label = None\n if prediction[0] == 0:\n prediction_label = \"RED\"\n elif prediction[0] == 1:\n prediction_label = \"GREEN\"\n elif prediction[0] == 2:\n prediction_label = \"NOLIGHT\"\n\n # Log the message\n rospy.loginfo(\"The label returned is %s\", prediction_label)\n\n # Return Unknown for now\n return TrafficLight.UNKNOWN", "def predict(self,url):\n\n # get image\n response = requests.get(url)\n \n img = Image.open(BytesIO(response.content))\n\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((128,128)),\n transforms.ToTensor()])\n\n img = transform(img).unsqueeze(0)\n\n if torch.cuda.is_available(): \n img = img.cuda() \n\n out = self.model(img)\n\n classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster','Telecaster']\n\n if torch.cuda.is_available():\n\n logs = out.cpu().data.numpy()\n \n else:\n\n logs = out.data.numpy()\n \n return [classes[logs.argmax()]]", "def infer_classes(png_fname):\n # 1 as speech\n \n from fastai.vision.image import open_image\n classes = model_classes.predict(open_image(png_fname))\n\n return classes", "def predict(image):\n with tf.Session(graph=graph) as session:\n saver = tf.train.Saver()\n saver.restore(session, \"saved_models/model12.ckpt\")\n print(\"Model restored.\")\n feed_dict = {tf_sample_dataset : image}\n predictions = session.run(train_prediction, feed_dict=feed_dict)\n # Prints an array of softmax probabilities for each digit in the number\n print str(predictions)\n return np.argmax(predictions, 2)", "def predict_classification_net(X_test, image_name):\n\t# Load training data mean \n\tmeans = np.load(PATH + 'Datasets/means_classification.npy')\n\t# Zero center\n\tX_test -= means\n\t# Create model\n\tmodel = build_classif_net()\n\t# Load weights\n\tmodel.load_weights(PATH + 'Weights/weights_classification_net.hdf5')\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\n\t# Predict model\n\tscores = model.predict(X_test)\n\t# Get indexes of the windows labeled as sealions (0 because sealions are [1 0])\n\tprediction = np.argmax(scores, axis=1)\n\n\tnp.save(PATH + 'Results/classification_'+ image_name + '.npy', prediction)\n\treturn prediction", "def score(filepath, filename, model):\n # Pillow library is used since we open a new file that wasn't in our test folder\n img = Image.open(join(filepath, filename))\n img = img.resize(fixed_size)\n img = np.array(img)\n img = img / 255.0\n img = img.reshape(1, fixed_size[0], fixed_size[1], 3)\n p = model.predict(img).tolist()[0]\n print(p)\n result = {'label': train_labels[p.index(max(p))], 'confidence': max(p)}\n return result", "def get_classification(self, image):\n # Detect bounding boxes\n box_coords, _ = self.tld.predict(image)\n \n if len(box_coords) == 0:\n rospy.loginfo('No boxes detected')\n return TrafficLight.UNKNOWN\n \n # Identify light state\n num_detected = [0] * 3 # count how many each light detected in case not all boxes agree\n \n for box in box_coords:\n x1 = int(box[0])\n y1 = int(box[1])\n x2 = int(box[2])\n y2 = int(box[3])\n\n tl_img = image[x1:x2,y1:y2]\n dsize = (15, 30)\n tl_img = cv2.resize(tl_img, dsize)\n\n image_array = np.asarray(tl_img)\n\n with self.graph.as_default():\n labels = self.simulator_model.predict(image_array[None,:,:,:])\n predict = np.argmax(labels)\n \n num_detected[predict] += 1\n \n predict = num_detected.index(max(num_detected))\n rospy.loginfo('Each light detected (%d,%d,%d) times. '%(num_detected[0],num_detected[1],num_detected[2]))\n rospy.loginfo('Predicted state: %d.'%predict)\n\n return predict", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def get_classification(self, image):\n \n img = cv2.resize(src=image, dsize=(IN_IMAGE_HEIGHT,IN_IMAGE_WIDTH))\n img = img.astype(float)\n img = img / 255.0\n\n img = img[np.newaxis,:,:,:]\n\n with self.graph.as_default():\n predictions = self.model.predict(img)\n predicted_cat = np.argmax(predictions,axis=1)\n\n light = predicted_cat[0]\n# rospy.logwarn(\"Predicted = %i \", light)\n if(light==0):\n return TrafficLight.GREEN\n elif(light==1):\n return TrafficLight.YELLOW\n elif(light==2):\n return TrafficLight.RED\n return TrafficLight.UNKNOWN", "def predict(self, image_or_filename: Union[np.ndarray, str]) -> Tuple[str, float]:\n if isinstance(image_or_filename, str):\n image = util.load_image(image_or_filename)\n else:\n image = image_or_filename\n return self.model.predict(image, batch_size=8)\n # return self.model.predict_on_image(image)", "def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [self.classes[np.argmax(elem)] for elem in self.data]\n return tags", "def http_classify(self, req):\n \n if len(req.files) != 0:\n img = np.fromstring(req.files['file'].read(), np.uint8)\n else:\n img = np.fromstring(req.data, np.uint8)\n \n img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def predict(input_shape, model, image_path):\n \n # Load and resize the image using PIL.\n img = PIL.Image.open(image_path)\n print('input_shape: ', input_shape)\n img_resized = img.resize(input_shape, PIL.Image.LANCZOS)\n\n # Plot the image.\n plt.imshow(img_resized)\n plt.show()\n\n # Convert the PIL image to a numpy-array with the proper shape.\n img_array = np.expand_dims(np.array(img_resized), axis=0)\n\n # Use the ResNet50 model to make a prediction.\n # This outputs an array with 1000 numbers corresponding to\n # the classes of the ImageNet-dataset.\n pred = model.predict(img_array)\n \n # Decode the output of the ResNet50 model.\n pred_decoded = decode_predictions(pred)[0]\n\n # Print the predictions.\n for code, name, score in pred_decoded:\n print(\"{0:>6.2%} : {1}\".format(score, name))\n \n return", "def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]", "def predict(frame):\n cv_net = cv2.dnn.readNetFromTensorflow(PATH_TO_MODEL_WEIGHTS, PATH_TO_GRAPH)\n labels = coco_label_reader(PATH_TO_LABELS)\n\n rows, cols, _ = frame.shape\n blob = cv2.dnn.blobFromImage(frame, size=(rows, cols), swapRB=True, crop=False)\n cv_net.setInput(blob)\n cv_out = cv_net.forward()\n boxes = []\n classes = []\n for detection in cv_out[0, 0, :, :]:\n score = float(detection[2])\n if score > 0.3:\n left = detection[3] * cols\n top = detection[4] * rows\n right = detection[5] * cols\n bottom = detection[6] * rows\n class_ = int(detection[1])\n if left > right:\n left, right = right, left\n if top > bottom:\n top, bottom = bottom, top\n boxes.append([left, top, right, bottom])\n classes.append(labels[class_])\n return non_max_suppression(np.asarray(boxes), np.asarray(classes))", "def predict_one_image(img_path, clf, labels):\n face_encodings, locs = extract_features(img_path)\n if not face_encodings:\n return None, None\n pred = pd.DataFrame(clf.predict_proba(face_encodings),\n columns = labels)\n pred = pred.loc[:, COLS]\n return pred, locs", "def predict_labels(model):\n test_datagen = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True\n #rescale=1. / 255,\n #samplewise_center=True,\n #samplewise_std_normalization=True\n )\n test_datagen.fit(test_data)\n # datagen.fit(val_data)\n # create generator for train data\n test_generator = test_datagen.flow(\n test_data,\n batch_size=batch_size,\n shuffle=False)\n pred_prob=model.predict_generator(test_generator,test_data.shape[0])\n pred_prob=pred_prob[:,0]\n def pre_class(x):\n \tif x<0.5:\n return 0\n else:\n return 1\n #def true_label(id):\n #\tif 'f0' in id:\n #\t return 0\n # elif 'f1' in id: \n # return 1\n #\telse:\n #\t pass\n #pred_true=map(true_label,test_id)\n #pred_true=np.array(pred_true)\n #print roc_auc_score(val_target, pred_prob)\n #prediction=map(pre_class,pred_prob)\n #print confusion_matrix(val_target,prediction)\n with open(\"prediction.csv\", \"w\") as f: \n\tp_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n for id,label in zip(test_id,pred_prob):\n\t p_writer.writerow([id, label])\n\t\n #base_path = \"PZ/test/test/\"\n\n #with open(\"prediction.csv\", \"w\") as f:\n # p_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n # for _, _, imgs in os.walk(base_path):\n # for im in imgs:\n # pic_id = im.split(\".\")[0]\n #img = cv2.imread(base_path+im)\n #img = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)\n #img = img.transpose((2,0,1))\n #img = np.expand_dims(img,axis=0)\n #img = load_img(base_path + im)\n #img = imresize(img, size=(img_height, img_width))\n #test_x = img_to_array(img).reshape(3, img_height, img_width)\n #test_x = test_x.reshape((1,) + test_x.shape)\n #test_datagen.fit(img)\n #test_generator = test_datagen.flow(img,\n # batch_size=1,\n # shuffle=False)\n #prediction = model.predict_generator(test_generator, 1)\n #p_writer.writerow([pic_id, prediction])", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [[self.classes[np.argmax(subrow)] for subrow in row] for row in self.data]\n return tags", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def get_classification(self, image):\n #TODO implement light color prediction\n choices = {0: \"GREEN\", 1: \"YELLOW\", 2: \"RED\", 3: \"UNKNOWN\"}\n\n if self.capture_images:\n cv2.imwrite(self.imgPath+str(int(time.clock()*1000))+'.jpg', image)\n print('[TLClassifier] Saved Image ... ')\n\n if self.debug:\n print('[TL Classifier] invoked... ')\n\n if image.shape != (300, 200, 3):\n print('[TL Classifier] image shape NOK: ' + str(image.shape))\n return \"UNKNOWN shape\"\n \n assert image.shape == (300, 200, 3)\n if self.debug:\n print('[TL Classifier] assertion ok: ')\n\n res = None\n res = cv2.resize(image, (32,32), interpolation = cv2.INTER_CUBIC)\n image = res.reshape(1, 32, 32, 3)\n classification = self.model.predict_classes(image, verbose=0)[0]\n result = choices.get(classification, 'UNKNOWN')\n\n if self.verbose:\n print('[TL Classifier] ' + result + ' detected.')\n\n return result", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(\n self, image_path, width=64, height=64, flatten=False\n ) -> Dict[str, float]:\n image = cv2.imread(image_path)\n image = cv2.resize(image, (width, height))\n image = image.astype(\"float\") / 255.0\n if flatten:\n image = image.flatten()\n image = image.reshape((1, image.shape[0]))\n else:\n image = image.reshape(\n (1, image.shape[0], image.shape[1], image.shape[2])\n )\n\n predictions = self.predict_model.predict(image)\n\n return dict(\n zip(\n self.labels.classes_,\n (round(float(pred), 3) for pred in predictions[0])\n )\n )", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def classify(im, model):\n\n classe = model.predict(im)\n classe = classe.argmax(axis=-1) # taking index of the maximum %\n return classe[0]", "def get_classification(self, image):\n\n imrs = cv2.resize(image, (64, 64)) \n imrs = imrs.astype(float)\n imrs = imrs / 255.0\n \n imrs = imrs[newaxis, :, :, :]\n\n with self.graph.as_default():\n preds = self.model.predict(imrs)\n \n predicted_class = np.argmax(preds, axis=1)\n\n choices = {0: TrafficLight.RED,\n 1: TrafficLight.YELLOW,\n 2: TrafficLight.GREEN,\n 3: TrafficLight.UNKNOWN}\n return choices.get(predicted_class[0], TrafficLight.GREEN)", "def predict(classifier, data):\n print(\"Beggining to classify data\")\n results = classifier.predict(data)\n results = pd.DataFrame(results)\n results.index += 1\n results.to_csv(\"out/results.csv\", header=[\"Label\"], index=True, index_label=[\"ImageId\"])\n print(\"Finished classifying data\")", "def get_classification(self, image):\n #TODO implement light color prediction\n max_idx = 4\n with self.detection_graph.as_default():\n with tf.Session(graph=self.detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n \n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n \n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n min_score_thresh = .50\n # find majority light state\n counter = [0, 0, 0, 0, 0]\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n counter[classes[i]] += 1\n for i in range(1, 5):\n if counter[i] > counter[max_idx]:\n max_idx = i\n return self.classmap[max_idx]", "def make_prediction(image, model, class_names):\n image_tensor = load_and_prep_image(image)\n # Turn tensors into int16 (saves a lot of space, ML Engine has a limit of 1.5MB per request)\n preprocessed_img = tf.cast(tf.expand_dims(image_tensor, axis=0), tf.int16)\n preds = predict_json(project=PROJECT,\n region=REGION,\n model=model,\n instances=preprocessed_img)\n pred_class = class_names[tf.argmax(preds[0])]\n pred_conf = tf.reduce_max(preds[0])\n return image_tensor, pred_class, pred_conf", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(model, img, imgSize):\n \n #Reajusta o tamanho da imagem para o tamanho esperado caso necessario.\n if img.size != imgSize :\n img = img.resize(imgSize)\n\n #Converte a imagem num array tridimensional.\n x = image.img_to_array(img)\n x = numpy.expand_dims(x, axis=0)\n #Normaliza a imagem.\n x = preprocess_input(x)\n \n #Faz a previsao atraves da rede.\n pred = model.predict(x)\n return imagenet_utils.decode_predictions(pred, top=5)[0]", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels", "def predict(self, data, version='default'):\n return self.skil.api.transformimage(\n deployment_name=self.deployment.name,\n image_transform_name=self.model_name,\n version_name=version,\n files=data\n )", "def decode_prediction(self, prediction):\n index = np.argmax(prediction)\n\n inv_map = {v: k for k, v in self.class_index.items()}\n label = inv_map[index]\n return label, np.amax(prediction)", "def predict(self, image):\n if len(image.shape) == 3:\n return self._predict_single(image)\n elif len(image.shape) == 4:\n return self._predict_batch(image)\n else:\n raise ValueError('Wrong image format.')", "def predict(self, image_path, topk=5, device='cpu'):\n self.model.to(device)\n self.model.eval()\n\n image = Image.open(image_path)\n np_image = self.process_image(image)\n image.close()\n image = np_image\n\n with torch.no_grad():\n image = torch.from_numpy(image).float()\n image = image.to(device)\n # reshape image to match shapes of images used from dataloaders\n image = image.view(1, *image.shape)\n output = self.model.forward(image)\n # put output back on cpu before moving to numpy\n output = output.cpu()\n\n values, indices = torch.topk(output.data, topk)\n ps = np.atleast_1d(torch.exp(values).numpy().squeeze()).tolist()\n\n idx_to_class = {\n value: key for key, value in self.model.class_to_idx.items()\n }\n classes = [idx_to_class[i]\n for i in np.atleast_1d(indices.numpy().squeeze())]\n\n return ps, classes", "def predict_from_image(image):\n cvimage = cv2.resize(image, config_utils.SHAPE)\n config_utils.logger.info(\"img shape after resize: '{}'.\".format(cvimage.shape))\n\n img = np.asarray(cvimage, dtype='float32')\n img /= 255.0 # scale 0 to 1\n mean = np.array([0.485, 0.456, 0.406]) \n std = np.array([0.229, 0.224, 0.225])\n img = (img - mean) / std\n img = np.transpose(img, (2,0,1)) \n img = np.expand_dims(img, axis=0) # e.g., [1x3x224x224]\n\n config_utils.logger.info(\"img shape final: '{}'.\".format(img.shape))\n\n predict(img)", "def get_classification(self, image):\n #TODO implement light color prediction\n \n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n start = datetime.now() #start = datetime.datetime.now() if import datetime\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand}) \n end = datetime.now() #end = datetime.datetime.now()\n c = end - start\n #rospy.logwarn(\"tl_classifier - Image predicted in: {0} seconds\".format(c.total_seconds()))\n #print(c.total_seconds())\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n print('tl_classifier - CLASSES: 1=Green, 2=Red, 3=Yellow, 4=Unknown: ', classes[0])\n #print('tl_classifier - SCORES: ', scores[0])\n #print('tl_classifier - TrafficLight.GREEN: ', TrafficLight.GREEN) = 2 CLASSES: 1\n #print('tl_classifier - TrafficLight.RED: ', TrafficLight.RED) = 0 CLASSES: 2\n #print('tl_classifier - TrafficLight.YELLOW: ', TrafficLight.YELLOW) = 1 CLASSES: 3\n #print('tl_classifier - TrafficLight.UNKNOWN: ', TrafficLight.UNKNOWN) = 4 CLASSES: 4\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n print('GREEN')\n return TrafficLight.GREEN\n elif classes[0] == 2:\n print('RED')\n return TrafficLight.RED\n elif classes[0] == 3:\n print('YELLOW')\n return TrafficLight.YELLOW\n else:\n rospy.logwarn(\"Light: UNKNOWN\")\n\n \n return TrafficLight.UNKNOWN", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def predict_image(self, image_paths):\n predictions = list()\n for image_path in image_paths:\n img = ImageHelper.get_image_by_path(image_path, self.target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = self.preprocess_input(x)\n\n with self.graph.as_default():\n features = self.model_base.predict(x)\n preds = self.model_top.predict(features)\n label, probability = self.decode_prediction(preds)\n\n predictions.append({\"image_path\": image_path,\n \"label\": label,\n \"probability\": probability})\n return predictions", "def get_classification(self, image):\n # return TrafficLight.RED\n # TODO implement light color prediction\n # creating an image object \n img_np = np.array(image) \n\n # convert np array to tensor\n input_tensor = tf.convert_to_tensor(img_np)\n\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis, ...]\n\n\n detections = self.loaded(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n\n # detection_classes should be ints.\n detections_dict = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n\n\n # detection_classes should be ints.\n detections_dict['detection_classes'] = detections_dict['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n\n # DEBUG - can do it in a cleaner way :0\n tl_classes = {3: 'green', 2: 'red'}\n top_classes_prediction = list(detections_dict['detection_classes']+label_id_offset)[:5] \n #print(top_classes_prediction)\n for i in range(len(top_classes_prediction)):\n if top_classes_prediction[i] == 2:\n top_classes_prediction[i] = 'green'\n elif top_classes_prediction[i] == 3:\n top_classes_prediction[i] = 'red'\n\n\n #print(\"--------->\", image_path, \"<-----------\")\n #print( top_classes_prediction ) \n #print(detections_dict['detection_scores'][:5], '\\n' )\n\n # basic red tl logic\n if top_classes_prediction[0] == 'red' and detections_dict['detection_scores'][0] >= 0.60:\n #print(\"-------------> RED TRAFFIC LIGHT <----------------\\n\")\n self.current_light = TrafficLight.RED\n #rospy.logwarn( \"----------------- Taffic light is RED !!! -------------------- \" )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n else:\n #print(\"No red traffic is detected\\n\")\n self.current_light = TrafficLight.GREEN\n #rospy.logwarn( \"----------------- You're good to go !!! --------: {0} - {1} \".format(top_classes_prediction[0], detections_dict['detection_scores'][0]) )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n\n return self.current_light", "def predict(uploaded_file):\n loc = AudioPredict.return_image(uploaded_file)\n return loc", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def get_classification(self, image):\n\n\tif 'session' in locals() and session is not None:\n \t print('Close interactive session')\n session.close()\n\n time_start = time.time()\n #TODO implement light color prediction\n #image_np = self.__preprocess_image(image)\n \timage_np = image \n \n \t# Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n time0 = time.time()\n\n # Actual detection.\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n time1 = time.time()\n\n output = self.__postprocessing_detected_box(scores[0], classes[0])\n rospy.loginfo('Time in seconds' + str(time1-time_start)+' Result:'+self.__traffic_id_to_name(output))\n return output", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict_image(self, array_data, w_x, w_y):\n\t\tproba = numpy.array(self.predict_proba_image(array_data, w_x, w_y))\n\t\treturn self.classes_.take(numpy.argmax(proba, axis=0))" ]
[ "0.8147907", "0.8066166", "0.7634904", "0.7613867", "0.7565054", "0.75600857", "0.7542438", "0.74744403", "0.7456327", "0.74438494", "0.7441574", "0.73974526", "0.73938596", "0.73485583", "0.73224735", "0.7307081", "0.7293566", "0.72353786", "0.72337973", "0.7217787", "0.7184297", "0.71636516", "0.7152879", "0.71269506", "0.71236366", "0.71122915", "0.709912", "0.7098921", "0.7067994", "0.704274", "0.7033562", "0.7009377", "0.70087147", "0.6995813", "0.69925743", "0.6988911", "0.69807523", "0.6980419", "0.6971459", "0.6970096", "0.6934128", "0.69324726", "0.69232994", "0.6919074", "0.69187534", "0.6911197", "0.6854696", "0.6852329", "0.6840272", "0.683236", "0.68119186", "0.6810578", "0.67955357", "0.6781024", "0.6738237", "0.6735718", "0.6726645", "0.672588", "0.672097", "0.67208594", "0.67133504", "0.67106104", "0.6703914", "0.6702606", "0.6697454", "0.66955274", "0.6681647", "0.66787016", "0.6678632", "0.6670061", "0.6669453", "0.665706", "0.6656783", "0.6641922", "0.6641365", "0.66381174", "0.66362643", "0.66274685", "0.662744", "0.66271126", "0.6625729", "0.662191", "0.66202575", "0.66202575", "0.6619568", "0.6614799", "0.66067696", "0.6601707", "0.6599775", "0.6595531", "0.659403", "0.65845066", "0.65737456", "0.6573495", "0.6569773", "0.6569008", "0.6567075", "0.6561305", "0.6560732", "0.65452415", "0.65446514" ]
0.0
-1
Publish response to kafka topic
def publish_response(class_label): client = KProducer(config=publisher_config) client.produce(class_label, PUBLISHER_TOPIC)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def produce(self, response, regex, ts):\n self.logger.info(\"Producing message...\")\n\n payload = {\n \"url\": response.url,\n \"latency\": response.elapsed,\n \"status\": response.status_code,\n \"check_time\": ts,\n }\n\n if regex:\n try:\n payload[\"regex_match\"] = bool(re.search(regex, response.text))\n except re.error as e:\n raise e\n\n try:\n self.producer.produce(\n self.topic,\n value=json.dumps(payload, cls=JSONDatetimeEncoder),\n callback=_log_produced,\n )\n self.producer.poll(1)\n except KafkaException as e:\n self.logger.error(\n \"An error occurred while producing a message: %s\", e.args[0].reason\n )", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def publish_mqtt(self, topic, data={}, on_publish=None, on_response=None, inject_rid=True):\n payload = data\n\n # If this is a dict and we're allowed to inject a request ID, do so\n # Injecting a request ID allows the nodes to respond and us to execute callbacks\n if (type(data) is dict) and inject_rid:\n data['rid'] = str(shortuuid.uuid())\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n payload = json.dumps(data)\n\n result, mid = self.mqtt.publish(topic, payload, qos=1)\n\n if on_publish:\n self.publish_callbacks[mid] = on_publish\n\n if on_response and data and data.get('rid', None):\n self.response_callbacks[data['rid']] = on_response\n\n self.publishes.append(mid)\n\n while mid in self.publishes:\n self.wait()", "def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));", "def reply(self, topic, callback):\n \n msg = self.topics[topic].recv()\n rep = callback(msg)\n self.topics[topic].send(rep)", "def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)", "def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0", "def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)", "def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def publish_and_wait(self, node, topic, data={}):\n pass", "def post(self):\n s = ScuttlebuttService()\n try:\n topic_dict = simplejson.loads(self.request.body)\n topic = s.CreateTopic(topic_dict)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(topic.ToDict()))\n except simplejson.JSONDecodeError:\n # HTTP 400 for bad syntax.\n self.response.set_status(\n 400, 'Failed to create topic. Invalid JSON: %s' % self.request.body)\n except Exception, e:\n # HTTP 422 for syntactically correct but semantically wrong.\n self.response.set_status(422, 'Error creating topic: %s' % e)", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def maybe_notify_lessee(request, response):\n if request.get('pubsub_topic'):\n pubsub.publish(\n pubsub.full_topic_name(\n request['pubsub_project'], request['pubsub_topic']),\n json.dumps(response),\n {},\n )\n metrics.pubsub_messages_sent.increment(fields={'target': 'lessee'})", "def publish(self, message: None):\n response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message)\n return response", "def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))", "async def publish(self, msgDict):\n try:\n msgId = msgDict.get(\"id\", None)\n msgType = msgDict.get(\"type\", None)\n msgRetry = msgDict.get(\"retry\", None)\n if msgId:\n self.write('id: {}\\n'.format(msgId))\n if msgType:\n self.write('event: {}\\n'.format(msgType))\n if msgRetry:\n self.write('retry: {}\\n'.format(msgRetry))\n self.write('data: {}\\n\\n'.format(msgDict[\"data\"]))\n await self.flush()\n return True\n except StreamClosedError:\n return False", "def response_kafka_topic_name(self, response_kafka_topic_name: str):\n\n self._response_kafka_topic_name = response_kafka_topic_name", "def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)", "def publish(self, message: str) -> None:", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n logger.debug('Publishing \"%s\" data to node \"%s\"', topic, node.node_id)\n\n logger.debug('Connecting to \"%s:%s\" over TCP socket', node.node_id, self.options['port'])\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((node.node_id, self.options['port']))\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n data = json.dumps(data)\n\n payload = {\n 'cmd': topic,\n 'data': data,\n }\n payload = json.dumps(payload)\n payload = bytes(payload, 'utf8')\n\n logger.debug('Sending %s bytes of data', len(payload))\n conn.send(payload)\n\n if on_publish:\n logger.debug('Calling publish callback')\n on_publish()\n\n conn.setblocking(0)\n ready = select.select([conn], [], [], self.timeout / 1000)\n payload, data = None, None\n\n if ready[0]:\n payload = conn.recv(8192)\n payload = str(payload, 'utf8')\n logger.debug('Received %s bytes of data' % len(payload))\n\n try:\n data = json.loads(payload)\n except json.decoder.JSONDecodeError as e:\n logger.error('Error while JSON decoding message payload: %s' % e)\n\n if on_response:\n logger.debug('Calling response callback')\n on_response(payload, data)\n\n logger.debug('Closing connection')\n conn.close()\n\n return payload, data", "def publish_and_wait(self, node, topic, data={}):\n return self.publish(node, topic, data=data)", "def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )", "def publish_to_simulation(self, topic, message, **kwargs):\n pass", "def acked(err, msg):\n if err is not None:\n print(\"Failed to deliver message: {}\".format(err))\n else:\n print(\"Produced record to topic {} partition [{}] @ offset {}\"\n .format(msg.topic(), msg.partition(), msg.offset()))", "def publish_status(client):\n client.publish(config.topic_get, payload=getlight())", "def publish(host, mqtt_port, rest_port, topic, payload):\n\n click.echo(\"Publishing the following message: \" + payload + \".\")\n service = Service(host, mqtt_port, rest_port)\n if service.mqtt_client.is_connected:\n if service.publish(topic, payload):\n click.secho(\"Message successfully published on topic: \" + topic +\n \".\", fg=\"green\", bold=True)\n else:\n click.secho(\"There was an error publishing this message!\",\n fg=\"red\", bold=True)\n else:\n click.secho(\"The client was unable to connect to the mqtt broker!\",\n fg=\"red\", bold=True)", "def publish(self, topic, content):\n # check if ACKed\n if not self.connack_rec:\n return 1\n\n # compose frame\n frame = Message.PublishFrame().compose(topic, content)\n\n # send frame\n self.send_q.put(frame.encode())", "def publish(self, data):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n # Data must be a bytestring\n logger.info(\"publishing message %s\" % data)\n data = data.encode('utf-8')\n self.publisher.publish(self.topic_path, data=data)\n\n logger.info('Published messages: {}'.format(data))\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def publish_event(self, topic):\n topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(topic, qos=2)\n logger.info(\"Event published on topic %s\", topic)", "def publish_mqtt_msg(topic, mqtt_msg):\n\n MQTT_HOST = settings.MQTT_HOST\n MQTT_PORT = settings.MQTT_PORT\n MQTT_KEEPALIVE_INTERVAL = settings.MQTT_KEEPALIVE_INTERVAL\n\n MQTT_TOPIC = topic\n\n MQTT_MSG = json.dumps(mqtt_msg)\n\n \"\"\" Celery task to create a password for the user \"\"\"\n\n celery_task.delay(MQTT_MSG)\n\n def on_publish(client, userdata, mid):\n print(\"Message Published...\")\n\n def on_connect(client, userdata, flags, rc):\n client.subscribe(MQTT_TOPIC)\n client.publish(MQTT_TOPIC, MQTT_MSG)\n\n def on_message(client, userdata, msg):\n print(msg.topic)\n print(msg.payload)\n payload = json.loads(msg.payload)\n print(payload['sepalWidth'])\n client.disconnect()\n\n mqttc = mqtt.Client()\n mqttc.on_publish = on_publish\n mqttc.on_connect = on_connect\n mqttc.on_message = on_message\n\n mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)", "def publishstats(self, topic=None, stats=None):\n ltopic = self.msgtopic\n if topic:\n ltopic = topic\n if not self.echotopic:\n self.echotopic = True\n logging.info(\"Selecting MQTT topic {}\".format(ltopic))\n\n self.mqttc.publish(ltopic, json.dumps(stats))", "def publish(self, topic, partition, data):\n c = self._connctions.get('__runpub__')\n\n if c and c.ws is not None:\n sm = SocketMessage('pub', topic=topic, partitions=partition, entries=data)\n c.ws.send(sm.toJSON())\n return True\n else:\n return False", "def trigger_result_email(\n self, project_id: str, topic_name: str,\n operation_counts_dict: Mapping[str, operation_counts.OperationCounts]\n ) -> None:\n topic = f'projects/{project_id}/topics/{topic_name}'\n message = {\n 'attributes': {\n 'content_api_results':\n json.dumps(\n operation_counts_dict,\n default=_convert_operation_counts_into_json)\n }\n }\n try:\n self._client.publish(topic, json.dumps(message).encode('utf-8'))\n except exceptions.GoogleCloudError as cloud_error:\n logging.exception('PubSub to mailer publish failed: %s', cloud_error)", "def publish_and_wait_mqtt(self, topic, data={}):\n result = [None, None]\n finish = Event()\n\n def on_response(payload, data):\n result[0] = payload\n result[1] = data\n\n def do_timeout():\n finish.set()\n\n self.publish_mqtt(topic, data, on_response=on_response)\n timer = Timer(self.timeout / 1000, do_timeout)\n timer.start()\n\n while (not result[0]) and (not finish.is_set()):\n self.wait()\n\n timer.cancel()\n\n if finish.is_set():\n raise TimeoutError('Reached timeout of %sms while waiting for response!' % self.timeout)\n\n return result", "def _publish(self, messages):\n num_of_msg = len(messages)\n\n LOG.debug('Publishing %d messages', num_of_msg)\n\n first = True\n while True:\n try:\n for topic in self._topics:\n self._kafka_publisher.publish(\n topic,\n messages\n )\n LOG.debug('Sent %d messages to topic %s',\n num_of_msg, topic)\n break\n except FailedPayloadsError as ex:\n # FailedPayloadsError exception can be cause by connection\n # problem, to make sure that is not connection issue\n # message is sent again.\n LOG.error('Failed to send messages %s', ex)\n if first:\n LOG.error('Retrying')\n first = False\n continue\n else:\n raise falcon.HTTPServiceUnavailable('Service unavailable',\n str(ex), 60)\n except Exception as ex:\n LOG.error('Failed to send messages %s', ex)\n raise falcon.HTTPServiceUnavailable('Service unavailable',\n str(ex), 60)", "def on_publish(self, mqtt_client, userdata, mid):\n logging.debug(\"DEBUG - publish ack received\")", "def send(self, result):\n\n logger.debug(\"Checking if we should send {}\".format(result))\n if result.is_failure() and self.will_publish(result):\n message = self._construct_message(result)\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n data = json.dumps({\n \"service_key\": self._api_key,\n \"event_type\": \"trigger\",\n \"description\": message,\n \"incident_key\": self._generate_id(result)\n })\n\n #exponential backoff\n logger.debug(\"Sending send {}\".format(result))\n for i in range(4):\n resp = requests.post(self._api_end_point,\n data=data, headers=headers, stream=True)\n\n logger.debug(\"Response from PagerDuty: {}\".format(resp.status_code))\n if 200 <= resp.status_code < 300:\n break\n elif resp.status_code == 403 or resp.status_code >= 500:\n #PagerDuty docs indicate these codes should result\n #in a wait and then a retry.\n time.sleep(2**i)\n else:\n raise PublishFailure(self, \"{0} - {1} ({2})\".format(result, resp.text, resp.status_code))\n else:\n raise PublishFailure(self, \"{0} - {1} ({2})\".format(result, resp.text, resp.status_code))", "def callback(ch, method, properties, body):\n requestParams = json.loads(body.decode('utf-8'))\n # print(\"inside the callback\")\n arg1 = int(requestParams[0])\n arg2 = int(requestParams[1])\n result = whaleClassifier.test(arg1, arg2)\n # what this does it publish the RESULT to the exchange (as producers of content \n # cannot send stuff directly to queues, they send to exchanges and then exchanges \n # send to queues. Note Exchange='' is default exchange which then sends to the\n # queue that is listed on the ROUTING_KEY argument.)\n ch.basic_publish(exchange='', \n routing_key=results_queue, \n body=json.dumps(result),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n # ch.basic_ack(delivery_tag=method.delivery_tag) #need this line so that we don't resend this same message again the next time\n # we start up this script. Which eventually clogs up memory", "def request(self, topic, req, callback):\n self.topics[topic].send(req)\n msg = self.topics[topic].recv()\n callback(msg)", "def publish(self):\n data = self.read_all_values()\n logger.info(data)\n if self.mqtt:\n self.mqtt.publish_json(data)", "def send_msg_to_kafka(self, msg: dict) -> None:\n producer_kafka_connection = self.connect_to_kafka()\n kafka_topic = self.topic_name\n url_as_key = bytes(self.source_url, 'utf-8')\n # Send message to Kafka topic\n try:\n logging.info(f'Sending to Kafka message -> {msg}')\n kafka_host = str(self.kafka_bootstrap_server).split(':')[0]\n kafka_port = str(self.kafka_bootstrap_server).split(':')[1]\n if not conn.dns_lookup(kafka_host, int(kafka_port)):\n logging.error(f'Unable to connect to {self.kafka_bootstrap_server}.'\n f' Please check if Kafka server is alive')\n sys.exit(1)\n meta = producer_kafka_connection.send(topic=kafka_topic, key=url_as_key, value=msg)\n # Make all messages in buffer ready to the sending\n producer_kafka_connection.flush()\n except Errors.BrokerNotAvailableError as e:\n producer_kafka_connection.close()\n logging.exception(f'{e}. Please check if config contains correct Kafka connection params or topic name')\n sys.exit(1)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending message to Kafka for visualizing\")", "def _send(self, topic, message):\n\n body = {'message': encode(message)}\n result = requests.post('{0}/topics/{1}'.format(self.apiUrl, topic), json=body)\n return result.json()", "def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m", "def message_sender(m):\n my_producer = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n my_producer.send(cfg.end_topic,m)\n return m", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def publish_messages(topic_arn, messages):\n sns_client = boto3.client('sns')\n for m in messages:\n message_as_json = json.dumps(m)\n response = sns_client.publish(\n TopicArn=topic_arn,\n MessageStructure='json',\n Message=json.dumps({\n 'default': message_as_json\n }),\n Subject=f'Source: {__file__}'\n )\n response_status = response['ResponseMetadata']['HTTPStatusCode']\n print(f'{message_as_json} -> {topic_arn} [{response_status}]')\n assert response_status == 200, response", "def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())", "def step(self) -> None:\n info = self._check_status()\n for key in info:\n self.client.publish(\n topic=f\"home/watering/{key}\", payload=info[key], retain=True\n )", "def producer(self, topic, msg, e=None):\n producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP']\n ,api_version=(2, 2, 1),security_protocol='SSL',\n ssl_check_hostname=True,\n ssl_cafile='/home/oulu/certs/ca-cert',\n ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt',\n ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key')\n\n msg_b = str.encode(msg)\n producer.send(topic, msg_b).get(timeout=30)\n\n if (e):\n logging.exception('exception happened')", "def publish(self, topic, message, subject=None):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Message' : message}\r\n if subject:\r\n params['Subject'] = subject\r\n response = self.make_request('Publish', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def publish(self, kpi_dict):\n pass", "def reply(self, private_key, msg_id, response):\n return self._samp_hub.reply(private_key, msg_id, response)", "def publish():\n pass", "def publish_message(self, topic, message):\n\n def delivery_report(err, msg):\n \"\"\" Called once for each message produced to indicate delivery result.\n Triggered by poll() or flush(). \"\"\"\n if err is not None:\n print('Message delivery failed: {}'.format(err))\n else:\n print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n\n # Trigger any available delivery report callbacks from previous produce() calls\n self.producer.poll(0)\n\n # Asynchronously produce a message, the delivery report callback\n # will be triggered from poll() above, or flush() below, when the message has\n # been successfully delivered or failed permanently.\n value_to_publish = message\n\n if self.handle_json_message_data:\n if type(message) not in (dict, list):\n raise MessageValueException(\"Your message should be json serializable!\")\n value_to_publish = json.dumps(value_to_publish)\n\n self.producer.produce(topic, value_to_publish.encode('utf8'), callback=delivery_report)\n\n # Wait for any outstanding messages to be delivered and delivery report\n # callbacks to be triggered.\n self.producer.flush()", "def pub(self, topic, msg, callback=None):\n return self._pub('pub', topic, msg, callback=callback)", "def on_publish(mqttc, obj, mid):\n logger.debug(\"MQTT PUBLISH: mid: \" + str(mid))", "def sendMessage(topic, data, key, producer):\n producer.poll(0)\n producer.produce(topic, data.encode('utf-8'), key, callback=delivery_report)\n producer.flush()", "def callback(ch, method, properties, body):\n print(f\" [x] Received {str(body)} kW.\")\n\n try:\n timestamp = properties.timestamp\n current_time = datetime.utcfromtimestamp(timestamp).replace(\n tzinfo=timezone.utc\n )\n except AttributeError:\n # If we don't get a timestamp from the broker, add a timestamp here.\n current_time = datetime.now().replace(tzinfo=timezone.utc)\n\n pv_photovoltaic = generate_pv_output(current_time)\n\n report_item = PVMeterReportItem(\n timestamp=current_time.isoformat(),\n pv_meter=int(body),\n pv_photovoltaic=pv_photovoltaic,\n )\n generate_report(report_item)\n\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def process_sink_msg(self):\n logging.debug('Received message on the sink socket')\n \n msg = self.sink_socket.recv_json()\n \n logging.debug('Message: %s', msg)\n\n # Publish the results to the clients using the\n # request id of the service request as the topic\n self.result_pub_socket.send_unicode(msg['uuid'], zmq.SNDMORE)\n self.result_pub_socket.send_json(msg)", "def acked(err, msg):\n if err is not None:\n print(\"failed to deliver message: {}\".format(err.str()))\n else:\n print(\"produced to: {} [{}] @ {}\".format(msg.topic(), msg.partition(), msg.offset()))", "def trace_callback(msg):\n # Construct topic\n msg_topic = 'modbus/msg/trace/{}/{}/{}'.format(node_id, msg.address, msg.function)\n # Send message as JSON\n logging.debug('Publishing message on {}, address={}, function={}'.format(msg_topic, msg.address, msg.function))\n client.publish(topic = msg_topic, payload = msg.to_JSON())", "def send_messages(self, partition, *msg):\n if self.async:\n for m in msg:\n self.queue.put((partition, create_message(m)))\n resp = []\n else:\n messages = [create_message(m) for m in msg]\n req = ProduceRequest(self.topic, partition, messages)\n try:\n resp = self.client.send_produce_request([req], acks=self.req_acks,\n timeout=self.ack_timeout)\n except Exception as e:\n log.exception(\"Unable to send messages\")\n raise e\n return resp", "def delivery_callback(err, msg):\n if err is not None:\n LOG.error(f\"Failed to deliver message: {msg}: {err}\")\n else:\n LOG.info(\"kafka message delivered.\")", "def publish_messages(line): \n command = \"gcloud beta pubsub topics publish \"+ topic_name+\" --message \"+'\"'+str(line)+'\"'\n os.system(command)", "def publish(self, node, topic, **kwargs):\n topic = self.generate_node_topic(node, topic)\n\n return self.publish_mqtt(topic, **kwargs)", "def on_publish(client, userdata, mid):\n print(\"Message Published.\")", "def emit(self, record):\n try:\n topic, record.msg = record.msg.split(TOPIC_DELIM,1)\n except Exception:\n topic = \"\"\n try:\n bmsg = cast_bytes(self.format(record))\n except Exception:\n self.handleError(record)\n return\n \n if isinstance(topic, str):\n btopic = cast_bytes(topic)\n else:\n print(\"Exception: topic is not string:{topic}\".format(topic=topic))\n btopic = b'Debug' \n\n self.socket.send_multipart([btopic, bmsg])", "def publish(client, pubsub_topic, data_lines):\n messages = []\n for line in data_lines:\n pub = base64.urlsafe_b64encode(line)\n tweet_dict = json.loads(line)\n # input_format = '%a %b %d %H:%M:%S %z %Y'\n dest_format = '%Y-%m-%dT%H:%M:%SZ'\n try:\n pubsub_timestamp = parser.parse(tweet_dict['created_at']).strftime(dest_format)\n except:\n print(\"Error encountered in parsing a 'created_at' timestamp from:\\n{0}\".format(tweet_dict))\n continue\n # print(\"tweet_dict: {0}\".format(tweet_dict))\n response = client.publish(topic=pubsub_topic,\n data=pub, created_at=pubsub_timestamp)\n # messages.append({'data': pub})\n # body = {'messages': messages}\n # resp = client.publish(\n # topic=pubsub_topic, body=body, created_at= ).execute(num_retries=NUM_RETRIES)\n # return resp", "def listen_publish_loop(responses, publisher):\n\n for response in responses:\n if rospy.is_shutdown():\n rospy.loginfo('Shutting Down...')\n break\n\n if not response.results:\n # Then there was no result in this response\n continue\n\n # The `results` list is consecutive. For streaming, we only care about\n # the first result being considered, since once it `is_final`, it\n # moves on to considering the next utterance.\n result = response.results[0]\n if not result.alternatives:\n continue\n\n if result.is_final:\n # Display the transcription of the top alternative.\n transcript = result.alternatives[0].transcript\n confidence = result.alternatives[0].confidence\n rospy.loginfo(\"Final: '%s' w/ confidence %f\" % (transcript, confidence))\n # Here is where it needs to be published\n transmsg = TranscriptionResult(transcribed_text=transcript,\n confidence=confidence)\n\n publisher.publish(transmsg)\n else:\n transcript = result.alternatives[0].transcript\n stability = result.stability\n rospy.loginfo(\"Interim: '%s' w/ stability %f\" % (transcript, stability))\n\n return", "def publishEvent(eventName,publisher, msg):", "def kafka_commit(self):\n self.kf_producer.flush()", "def mqtt_publish(image):\n logging.debug('publishing image to mqtt broker topic %s', \n config['mqtt']['publish_topic'])\n mqtt_client.publish(config['mqtt']['publish_topic'], image)", "def topic(self, topic):\n self.connection.topic(str(self), topic)", "def produce_messages(self, device_id, measurements):\n\n msg = {'device_id': device_id,\n 'measurements': measurements}\n\n self.channel.basic_publish(exchange='',\n routing_key=self.queue_name,\n body=json.dumps(msg),\n properties=pika.BasicProperties(content_type='application/json'))", "def publish(self, topic: Hashable, *args, **kwargs):\n for sub in self.subscribers[topic]:\n sub(*args, **kwargs)", "def publish_watering_message(uid):\n d = dict()\n d['watering'] = dict()\n d['watering']['timestamp'] = time.time()\n d['watering']['uid'] = uid\n\n message = json.dumps(d)\n logging.info('Publish watering request: %s', message)\n paho.mqtt.publish.single('planteur/watering', message)", "def on_publish(client: mqtt.Client, userdata: Any, mid: int) -> None:\n logging.info(f\"Successfully published a message: mid={mid}\")", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def sendToSplunk(self,\n splunk_hec):\n\n # Initialize and start consumer if down\n if(not self.consumer_started):\n self.consumer = self.getConsumer(self.client.topics[self.topic])\n\n # Attempt to send messages to Splunk\n status_code = splunk_hec.writeToHec(self.messages)\n\n # clear messages\n self.messages = []\n\n # Check for successful delivery\n if(status_code == 200):\n # commit offsets in Kafka\n self.consumer.commit_offsets()\n return\n else:\n # Stop consumer and mark it down\n self.consumer.stop()\n self.consumer_started = False\n\n # Raise exception for retry\n logging.error(\"Failed to send data to Splunk HTTP Event Collector - check host, port, token & channel\")\n raise Exception('Failed to send data to Splunk HTTP Event Collector - Retrying')", "def produce(self, message):\n self.producer.send(self.topic, message)", "def send_to_kafka(rows):\n producer = connect_kafka_producer()\n for row in rows:\n print(row.asDict())\n producer.send(TOPIC_NAME, value=row.asDict())\n producer.flush()", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "async def publish(self, nats_subject: str, payload_df):\n await self.nc.publish(nats_subject, payload_df)", "def publish_and_wait(self, node, topic, **kwargs):\n topic = self.generate_node_topic(node, topic)\n\n return self.publish_and_wait_mqtt(topic, **kwargs)", "def pubcmd(self, topic, qos, value, deviceid):\n try:\n self.log.info(u\"==> Publish MQTT message: '%s'='%s' for device name '%s'\" % (topic, value, self.devicelist[deviceid][\"name\"]))\n (result, mid) = self.MQTTClient.publish(str(topic), str(value), int(qos))\n except ValueError: # Will be raised if topic is None, has zero length or is invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if the length of the payload is greater than 268435455 bytes.\n errorstr = u\"### Invalid '%s' topic for device name '%s'\" % (topic, self.devicelist[deviceid][\"name\"])\n self.log.error(errorstr)\n return False, errorstr\n \n if result == mqtt.MQTT_ERR_SUCCESS:\n return True, None\n else:\n errorstr = u\"### Publishing value '%s' with '%s' topic for device name '%s' has failed.\" % (value, topic, self.devicelist[deviceid][\"name\"])\n self.log.error(errorstr)\n return False, errorstr", "def _publish_request(self, body, correlation_id):\n try:\n conn = self._listener.wait_ready()\n if conn:\n with kombu.producers[conn].acquire(block=True) as producer:\n producer.publish(\n body=body,\n exchange=self.exchange,\n routing_key=self.topic,\n reply_to=self.queue_name,\n correlation_id=correlation_id,\n delivery_mode=2\n )\n return True\n except socket.error as e:\n if e.errno != errno.EPIPE:\n raise\n else:\n LOG.debug('Retrying publish due to broker connection failure')\n return False", "def publish(\n hass: HomeAssistant,\n topic: str,\n payload: PublishPayloadType,\n qos: int | None = 0,\n retain: bool | None = False,\n encoding: str | None = DEFAULT_ENCODING,\n) -> None:\n hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)", "def publish_mqtt_message(self, topic: str, payload: str) -> bool:\n self.log.error(\n \"This functionality is not enabled yet since it is unclear \"\n \"whether this CSC will be responsible for this or if this will be \"\n \"done via the HVAC software user interface.\"\n )\n return False\n # msg_info = self.client.publish(topic=topic, payload=payload)\n # return msg_info.is_published()", "def publish(self):\n return", "def call(self, json_msg):\n self.response = None\n self.corr_id = str(uuid.uuid4())\n print(\"Sending data: \" + str(json_msg), file=sys.stdout)\n # Write the json message onto the specified queue\n self.channel.basic_publish(\n exchange='',\n routing_key=self.routing_key,\n properties=pika.BasicProperties(\n reply_to=self.callback_queue,\n correlation_id=self.corr_id,\n delivery_mode=2,\n ),\n body=json.dumps(json_msg)\n )\n # Listen for the response recieved as part of the request-reply pattern\n while self.response is None:\n self.connection.process_data_events()\n\n return self.response" ]
[ "0.6920841", "0.6638568", "0.6578799", "0.6561684", "0.6525909", "0.65240884", "0.6514028", "0.641655", "0.62882924", "0.627717", "0.62517506", "0.61733186", "0.61410475", "0.6117747", "0.61167103", "0.61138386", "0.61064917", "0.608827", "0.6083523", "0.6068794", "0.60644025", "0.6057398", "0.60570663", "0.6046443", "0.5974617", "0.59727097", "0.5960435", "0.5958009", "0.59568113", "0.5949957", "0.5948443", "0.59387016", "0.59300995", "0.59277797", "0.59173715", "0.58980703", "0.58974993", "0.5895266", "0.58930343", "0.5882301", "0.5858871", "0.58564544", "0.5845909", "0.5842628", "0.5817927", "0.5807262", "0.5788592", "0.5786615", "0.57822084", "0.5769515", "0.5752356", "0.5752356", "0.57449996", "0.5735036", "0.57197994", "0.5704225", "0.5704194", "0.56930417", "0.5692574", "0.56915957", "0.56766987", "0.5649441", "0.56396544", "0.56291527", "0.562823", "0.5625506", "0.5624444", "0.56190276", "0.560589", "0.560398", "0.5597935", "0.55964637", "0.55854553", "0.55760604", "0.5573319", "0.5556997", "0.5546165", "0.5537694", "0.5536049", "0.551304", "0.55030906", "0.549069", "0.5486974", "0.5481931", "0.5457413", "0.54558635", "0.54518086", "0.54464626", "0.5443429", "0.5443382", "0.5435871", "0.543154", "0.5424679", "0.5422194", "0.54132456", "0.54072756", "0.5404721", "0.5368835", "0.5363301", "0.5358102" ]
0.7494711
0
Start Consuming data coming in as images from requesters
def enable_subscription(): client = KConsumer(config=subscriber_config) counter = 0 while 1: data = client.consume() if data: print("Received Data", counter) class_label = inference_on_data(data.value) publish_response(class_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __image_request_handler(self):\n self.__logger.info(\"Image Request Handling Thread started\")\n ticker = Ticker(2)\n while self._running:\n timeout = ticker.end_tick(False)\n try:\n task = self.__image_queue.get(True, timeout)\n except Queue.Empty:\n ticker.start_tick()\n continue\n\n # There is a task to process\n ticker.start_tick()\n source, connection = task\n\n # Check if the connection has been closed. If it was,\n # do not bother processing the request.\n if not connection.connected():\n self.__logger.info(\"Skipping request for image of source %s\" \\\n \" because requesting client disconnected\" \\\n % source)\n self.__image_queue.task_done()\n continue \n\n # Obtain new image\n error = \"No image available\"\n image = None\n mtime = time.time()\n if source in self.__video_modules:\n try:\n mtime, image = self.__get_image(source)\n except Exception as err:\n error = \"Obtaining image failed: %s\" % repr(err)\n else:\n error = \"Video source %s has not been started\" % source\n\n if connection.connected():\n if image:\n # Valid image was obtained\n img_str = image.tostring()\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'shape': (image.width, image.height),\n 'depth': image.depth,\n 'nChannels': image.nChannels}\n else:\n # An error occured, notify the vision module\n self.__logger.info(\"Failed to obtain image for source %s. \"\\\n \" Error message: %s\" % (source, error))\n img_str = \"\"\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'error': error}\n # Send the data to the vision module.\n if not connection.sendall(data, img_str):\n self.__logger.warning(\"Failed to send data to client. \" \\\n \"Probably disconnected\")\n else:\n self.__logger.info(\"Image of source %s obtained but not \" \\\n \"sending because requesting client \" \\\n \"disconnected\" % source)\n self.__image_queue.task_done()\n self.__logger.info(\"Image Request Handling Thread ended\")", "def start_processing(self):", "def image_server():\n yield from http_server_thread(ImageHandler)", "def request_file(flags, image_data):\n\n with Image.open(io.BytesIO(image_data)) as img:\n proc_img = process_image_file(flags, img)\n\n return proc_img", "def feed_data(self):\n test = True\n\n sizex_pv = self.detector + ':image1:ArraySize0_RBV'\n sizey_pv = self.detector + ':image1:ArraySize1_RBV'\n acquire_pv_name = self.get_acquire_pv_name()\n while test:\n self.sizex = caget(sizex_pv)\n self.sizey = caget(sizey_pv)\n ack = caget(acquire_pv_name)\n if ack == 1:\n test = False\n self.start_processes()\n else:\n time.sleep(.005)\n\n # # start the infinit loop so the feed does not stop after this init\n # if True:\n # time.sleep(10)\n #\n return caget(acquire_pv_name)", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def handle(self, req):\n imAgree = False\n\n self.isReady = False\n self.subCamera = rospy.Subscriber(self.source, Image,\n self.cameraCallback)\n while not self.isReady:\n time.sleep(0.01)\n\n # checking for adequacy\n print(self.list)\n # flag = 0\n # while not imAgree:\n # self.subCamera = None\n # self.measuring = None\n # flag = input('checking: type 1 if all right: ')\n # if flag == 1:\n # imAgree = True\n\n # stop processing image\n self.subCamera = None\n self.measuring = None\n return [self.list]", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def gen(self):\n\n # context = zmq.Context()\n # receiver = context.socket(zmq.PULL)\n self.receiver.connect(inference_url())\n\n while self.is_opened:\n ret = self.receiver.recv_pyobj()\n\n nparr = np.frombuffer(np.array(ret['data']), np.uint8)\n\n # logger.warning('Receive: %s', ret['ts'])\n # logger.warning('Time elapsed: %s', (time.time()-self.keep_alive))\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # ret2 = receiver.recv_pyobj()\n # logger.warning(ret2['ts'])\n # logger.warning(ret2['shape'])\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n cv2.imencode('.jpg', img)[1].tobytes() + b'\\r\\n')\n self.receiver.close()", "def process_image(self):\n pass", "def first_streaming_request(self) -> global___Snippet.SimpleRequestInitialization:", "def process(self):\n if not self._requests:\n return\n\n self._processing = True\n Engine.instance().start()", "def run(self):\n while self.running:\n self.handle_request()", "def _start(self):\n\n\t\tr = self.send_request(self._src_url)\n\n\t\tsoup = BeautifulSoup(r.content, \"html.parser\")\n\n\t\tchap = ChapterInfo.from_soup(soup)\n\n\t\tself._title = chap.title\n\n\t\twith tempfile.TemporaryDirectory() as temp_dir:\n\t\t\timage_paths = self._download_images(chap.image_urls, temp_dir)\n\n\t\t\tnum_pages = self._create_pdf(image_paths)\n\n\t\t\tself._percent_saved = num_pages / len(chap.image_urls)", "def input_handler(data, context):\n if context.request_content_type == 'application/x-image':\n payload = data.read()\n\n img = Image.open(io.BytesIO(payload))\n img = img.convert('RGB')\n img = img.resize((IMG_SIZE, IMG_SIZE), Image.NEAREST)\n img_array = image.img_to_array(img)\n img_array = img_array.astype(np.uint8)\n \n img_preprocessed = preprocess_input(img_array)[None, :]\n\n return json.dumps({\"instances\": np.array(img_preprocessed).tolist()})\n else:\n _return_error(415, 'Unsupported content type was \"{}\"'.format(\n context.request_content_type or 'Unknown'))", "def process_samples_in_network(eeg_sender, aud_sender):\n eeg_sender.start_processing()\n aud_sender.start_processing()\n eeg_sender.wait_for_completion()\n aud_sender.wait_for_completion()", "def input_handler(data, context):\n\n if context.request_content_type == \"application/x-image\":\n payload = data.read()\n encoded_image = base64.b64encode(payload).decode(\"utf-8\")\n instance = [{\"b64\": encoded_image}]\n return json.dumps({\"instances\": instance})\n else:\n _return_error(\n 415, 'Unsupported content type \"{}\"'.format(context.request_content_type or \"Unknown\")\n )", "def process_request_starts(self, request):\n pass", "def start(self):\n self._client.predict(\n endpoint=self._endpoint, instances=self._request)\n\n if self._completion_callback:\n if self._query_handle:\n callback_args = [self._query_handle]\n else:\n callback_args = []\n self._completion_callback(*callback_args)", "def start_requests(self):\n requests = self.crawl_s3_bucket()\n\n for request in requests:\n yield request", "def handle_req( self, req ):\n start_time_handle = time.time()\n stamp = req.stamp.data\n\n cv_image = None\n for i in range(3):\n cv_image, fail = self.pop_image_by_timestamp(stamp)\n if cv_image is None and fail == 0:\n rospy.logerr(\"Unable find image swarm loop too slow!\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n else:\n if fail == 1:\n print(\"Wait 0.02 sec for image come in and re find image\")\n rospy.sleep(0.02)\n cv_image = self.pop_image_by_timestamp(stamp)\n else:\n break\n\n if cv_image is None:\n rospy.logerr(\"Unable to find such image\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n\n\n # print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\\ta=', req.a, '\\tt=', stamp )\n if len(cv_image.shape)==2:\n # print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'\n cv_image = np.expand_dims( cv_image, -1 )\n elif len( cv_image.shape )==3:\n pass\n else:\n assert False\n\n\n assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \\\n \"\\n[whole_image_descriptor_compute_server] Input shape of the image \\\n does not match with the allocated GPU memory. Expecting an input image of \\\n size %dx%dx%d, but received : %s\" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )\n\n ## Compute Descriptor\n start_time = time.time()\n i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]\n print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n\n # u = self.model.predict( i__image )\n with self.sess.as_default():\n with self.sess.graph.as_default():\n # u = self.model.predict( i__image )\n u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})\n\n print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )\n # print( '\\tinput_image.shape=', cv_image.shape, )\n # print( '\\tinput_image dtype=', cv_image.dtype )\n # print( tcol.OKBLUE, '\\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )\n # print( '\\tdesc.shape=', u.shape, )\n # print( '\\tdesc minmax=', np.min( u ), np.max( u ), )\n # print( '\\tnorm=', np.linalg.norm(u[0]) )\n # print( '\\tmodel_type=', self.model_type )\n\n\n\n ## Populate output message\n result = WholeImageDescriptorComputeTSResponse()\n # result.desc = [ cv_image.shape[0], cv_image.shape[1] ]\n result.desc = u[0,:]\n result.model_type = self.model_type\n print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n return result", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def _request_fixtures(self, sample_raw_image_longer, sample_raw_image_mask, helpers, plot_img):\n self.sample_raw_image = sample_raw_image_longer\n self.sample_raw_image_mask = sample_raw_image_mask\n self.helpers = helpers\n self.plot_img = plot_img", "async def grab(self):\r\n # TODO probe the system for optimal size\r\n await self.configure_acquisition(100, continuous=True)\r\n\r\n self.start_acquisition()\r\n with trio.CancelScope():\r\n while True:\r\n yield await self.get_image(mode=BufferRetrieveMode.Latest, copy=False)\r\n self.stop_acquisition()\r\n\r\n await self.unconfigure_acquisition()", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def start(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Hi send an image to classify!')", "def run(self):\r\n self.collect_data()", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def start_stream(self):\n pass", "def streaming_request(self) -> global___Snippet.SimpleRequestInitialization:", "def stream_mock_data(port, queue):\n img_files = [os.path.join(MOCK_DATA_PATH, f) for f in os.listdir(MOCK_DATA_PATH)\n if os.path.isfile(os.path.join(MOCK_DATA_PATH, f)) and not f.startswith('.')]\n cur = 0\n while True:\n data = Image.open(img_files[cur])\n yield queue.append(Item(data, datetime.now()))\n logging.info('{}: queue length {}'.format(port, len(queue)))\n yield gen.sleep(random()/5)\n cur = (cur + 1) % len(img_files)", "def __on_pre_processing_images_started(self):\n\n self.progress_window.show_pre_process_images_animation()", "def image_fetcher(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def image_fetcher_depricated(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/image/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def CollectImages(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def start(self):\n\t\tself.stream.start_stream()", "def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)", "def worker():\n while True:\n image, objects = queue.get()\n\n # Call on_image Event Function\n self.on_image(image)\n\n # Call on_image Callback Functions\n for callback in self.on_image_callbacks:\n callback(image)\n\n if objects:\n # Call on_object Event Function\n self.on_object(image, objects)\n\n # Call on_object Callback Functions\n for callback in self.on_object_callbacks:\n callback(image, objects)", "def run(self):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n # allow anyone else to handle the request at this point\n handled = tools.run_callback(\"handle\", \n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n if not handled == 1:\n blosxom_handler(self._request)\n\n # do end callback\n tools.run_callback(\"end\", {'request': self._request})", "def process(self, image):", "async def batched_generate_handler(self, prompts: List[str]):\n print(\"Number of input prompts: \", len(prompts))\n num_to_pad = _MAX_BATCH_SIZE - len(prompts)\n prompts += [\"Scratch request\"] * num_to_pad\n\n images = self.generate_tpu(prompts)\n results = []\n for image in images[: _MAX_BATCH_SIZE - num_to_pad]:\n file_stream = BytesIO()\n image.save(file_stream, \"PNG\")\n results.append(\n Response(content=file_stream.getvalue(), media_type=\"image/png\")\n )\n return results", "def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()", "def __init__(self):\n self.image_subscriber = rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, self.imageCallback)\n print 'Waiting for classifier service to come up...'\n rospy.wait_for_service('/classifier_node/classify')\n self.classify_client = rospy.ServiceProxy('/classifier_node/classify', Classify)", "def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)", "def startRep(self, rep):\n \n pass", "def on_start(self):\r\n self.start_whole_exposure()", "def start_requests(self):\n yield Request(self.base_url, \n callback=self.parse_urls)", "def getCamera1():\n for msg in camera1:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def gen_livestream():\n\n flag = True\n frame = _dog()\n while True:\n time.sleep(0.02)\n if app.images.qsize():\n image = app.images.get()\n if flag:\n image = base64_to_cv2(image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n rects = detector(gray, 0)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n \n for (x, y) in shape:\n cv2.circle(image, (x, y), 2, (0, 255, 0), -1)\n _, frame = cv2.imencode('.jpg', image)\n else:\n frame = _dog()\n # print(position)\n flag = not flag\n # yield ('Content-Type: image/jpeg\\r\\n\\r\\n' + base64.b64encode(frame).decode(\"utf-8\") + '\\r\\n')\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def __init__(self, start_iter):\n super().__init__(start_iter)\n self.imgs = {}\n self.is_new = False", "def run(self):\n self._connection = self.open_connection()\n self._connection.ioloop.start()", "def processRequest():\n # if we are not in the address list, then this is not an initialized connection\n if request.remote_addr not in addressList:\n # if the address is not in the list and it is not a market\n # request, then it is web gallery traffic\n if not urlEncode.isMarket(request.url):\n sendToImageGallery(request)\n return\n # if this is a market request, then proceed with new session initialization\n else:\n encoded = {'url':request.url, 'cookie':[]}\n decoded = urlEncode.decode(encoded)\n sender, receiver = frame.initServerConnection(decoded, PASSWORDS, callback)\n # if the client sent a bad password, print an error message\n # and return an empty image\n if sender == False:\n print \"Bad password entered\"\n return sendToImageGallery(request)\n # Note: this will need to change to accomodate multiple client sessions\n htptObject.assembler = sender\n htptObject.disassembler = receiver\n addressList.append(request.remote_addr)\n #send back a blank image with the new session id\n framed = htptObject.assembler.assemble('')\n image = imageEncode.encode(framed, 'png')\n return serveImage(image)\n #TODO\n #setup some way to maintain a single Internet connection per client\n # if this is an initialized client, then receive the data and see\n # if we have anything to send\n else:\n #receive the data\n decoded = urlEncode.decode({'url':request.url, 'cookie':request.cookies})\n htptObject.disassembler.disassemble(decoded)\n # see if we have any data to return\n readyToRead, readyToWrite, inError = \\\n select.select([htptObject.torSock], [], [], 0)\n # if we have received data from the Tor network for the Tor\n # client, then send it\n if readyToRead != []:\n # get up to a megabyte\n dataToSend = readyToRead[0].recv(1024*1000)\n# print \"Server Sending: {}\".format(dataToSend)\n else:\n dataToSend = ''\n # put the headers on the data (not the actual function name)\n framed = htptObject.assembler.assemble(dataToSend)\n # encode the data\n encoded = imageEncode.encode(framed, 'png')\n # send the data with apache\n return serveImage(encoded)", "def request_image(self, source, connection):\n try:\n self.__image_queue.put_nowait((source, connection))\n return True\n except Queue.Full:\n return False", "def run(self):\n self.started()", "def process(image):\n pass", "def start_requests(self):\n url = self.start_urls[0]\n yield scrapy.Request(url=url, callback=self.parse)", "def master():\n init = Initializer.create_init()\n while True:\n # current frame\n ret, frame = 'unknown', np.random.rand(224, 224, 3) * 255\n frame = frame.astype(dtype=np.uint8)\n Thread(target=send_request, args=(frame.tobytes(), 'block12345', 'initial')).start()\n time.sleep(1)", "def start_requests(self):\r\n yield Request(url=MooreSpider.start_url,\r\n callback=self.parse_directory_list,\r\n method=\"GET\")", "def request_start(self, req):\n log.info(\"Received start request\")\n if not self._configured:\n msg = \"FITS interface server is not configured\"\n log.error(msg)\n return (\"fail\", msg)\n try:\n fw_socket = self._fw_connection_manager.get_transmit_socket()\n except Exception as error:\n log.exception(str(error))\n return (\"fail\", str(error))\n log.info(\"Starting FITS interface capture\")\n self._stop_capture()\n buffer_size = 4 * (self.nchannels + 2)\n handler = R2SpectrometerHandler(2, self.nchannels,\n self.integration_time,\n self.nblank_phases,\n fw_socket)\n self._capture_thread = CaptureData(self._capture_interface,\n self._capture_port,\n buffer_size,\n handler)\n self._capture_thread.start()\n return (\"ok\",)", "def run(self):\n ioloop.IOLoop.current().start()", "def input_processing(url):\n try:\n response = requests.get(url)\n img_array = (Image.open(BytesIO(response.content)).convert('L')).resize((400, 400))\n img_array = np.array(img_array)\n except Exception as exception_type:\n print(exception_type)\n empty_img = Image.new('L', (400, 400))\n img_array = empty_img.resize((400, 400))\n img_array = np.array(img_array)\n\n return img_array", "def init(self):\n\t\tsp_addcallback(self.sp_callback)\n\t\tself.downloader.start()", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def search():\n def eventStream():\n while True:\n try:\n data = q.get(timeout=0.5)\n except:\n data = {'msg': 'Cloud Run cold start'}\n yield \"event: images\\ndata: {}\\n\\n\".format(json.dumps(data))\n return Response(eventStream(), mimetype=\"text/event-stream\")", "def run(self):\n for transis_response in self.transis_consumer.get_detector_counts():\n self.di_framework_client.start_job()\n response = self.push_transis_response_to_kinesis(transis_response, self.di_framework_client)\n log.info(response)\n self.di_framework_client.log_job_status(json.dumps(response))\n self.di_framework_client.end_job()", "def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()", "def do_all(self):\r\n self.frame_gen.start()\r\n\r\n while True:\r\n msg = self.rec_queue.get()\r\n if msg[0] == 'sync':\r\n self.send_queue.put(('sync', time.time()))\r\n continue\r\n if msg[0] == 'finish':\r\n break\r\n if msg[0] != 'img':\r\n raise ValueError(f'strange msg: {msg}')\r\n\r\n frame_num = msg[1]\r\n time_ms = self.ms_per_frame * frame_num\r\n rawimg = self.frame_gen.generate_at(time_ms)\r\n self.img_queue.put((frame_num, rawimg))\r\n self.send_queue.put(('post', frame_num))\r\n rawimg = None\r\n\r\n self.frame_gen.finish()\r\n\r\n self.img_queue.close()\r\n self.rec_queue.close()\r\n self.send_queue.close()", "def start(self):\n self.ioloop.add_callback(self.get_data)\n logging.info(\"[DataSource] Started\")", "async def _async_request_image(\n self, request_method: Callable[[], Coroutine[Any, Any, None]]\n ) -> bytes | None:\n if not self.available:\n return None\n image_future = self._loop.create_future()\n self._image_futures.append(image_future)\n await request_method()\n if not await image_future:\n return None\n return self._state.data", "def run(self):\n self.arbiter.start()", "def process_single(self, ims):\n version, size, endpoint, method = Requester.sniff(ims)\n methodkey = (endpoint, method)\n resource = self.RESOURCES.get(methodkey)\n if resource is None:\n raise EndpointError(f\"Couldn't find the endpoint/method to execute from {methodkey}.\")\n\n R = resource(self.dhtdoer, self)\n R.parse(ims, size, version)\n R.execute()\n del ims[:PREFIX_SIZE+size]", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)", "def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)", "def start_requests(self):\n authors_pandas = conf.read_from_data('authors.json')\n author_link_list = list(\n map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']),\n authors_pandas))\n for link in author_link_list:\n yield Request(url=link[1])", "def run(self):\n while self.container.process(): pass", "def start():", "def start():", "def start():", "def start():", "def gen():\n while True:\n retval, frame = vc.read()\n\n if retval:\n #image_processing(frame)\n frame = cv2.imencode('.jpg', frame)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def start_requests(self):\n yield scrapy.Request(url=self.start_urls[0])", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def __init__(self, do, token, url, agent):\n super(Image, self).__init__(token, agent)\n self.do = do\n self.uri = \"%s/images\" % url", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def on_process_image(self, img, prefix):\n\t\traise NotImplementedError(\"You need to implement this to tweet to timeline (or pass if you don't want to)!\")", "def start_reader(self):\n # if already started, return immediately\n if self.running:\n return\n\n # construct a new reader & start it\n self.reader = threading.Thread(target = self.read_data)\n self.reader.start()", "def __init__(__self__,\n resource_name: str,\n args: StreamingImageArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))" ]
[ "0.63510275", "0.6212329", "0.60436887", "0.597931", "0.593665", "0.58673537", "0.57487386", "0.57463884", "0.5703741", "0.56677437", "0.5652428", "0.5626479", "0.56006426", "0.55784166", "0.55780387", "0.5559267", "0.554615", "0.5536241", "0.5526545", "0.55169946", "0.5499773", "0.5490339", "0.54874617", "0.5480384", "0.5472363", "0.5472363", "0.5472363", "0.546315", "0.5449545", "0.54179215", "0.5406911", "0.540556", "0.5403532", "0.53940403", "0.53709143", "0.53642374", "0.5358591", "0.53396535", "0.53338796", "0.53324175", "0.53309333", "0.5325785", "0.53215665", "0.53201306", "0.53062814", "0.5298434", "0.52962685", "0.52927333", "0.52879435", "0.52877605", "0.5273069", "0.5272424", "0.5261215", "0.52602875", "0.52527374", "0.5252578", "0.5252348", "0.5244921", "0.52378756", "0.5214184", "0.52060974", "0.5202691", "0.5198686", "0.51764244", "0.51749367", "0.517205", "0.51694816", "0.51641035", "0.5161599", "0.5153645", "0.5152011", "0.5144453", "0.51239735", "0.51220053", "0.51153225", "0.51151735", "0.511138", "0.511138", "0.51113236", "0.51076263", "0.51043594", "0.51043594", "0.51043594", "0.51043594", "0.5100037", "0.5090645", "0.50897086", "0.50848395", "0.50848395", "0.50848395", "0.50848395", "0.50848395", "0.50848395", "0.5083263", "0.5078432", "0.5078423", "0.5073881", "0.5071712", "0.50700134", "0.50691265", "0.50637656" ]
0.0
-1
Serializer used by the Producer Service to send class_label to subscribers. class_label is generated by inferring on image using pretrained lenet
def kafka_serializer(data): return json.dumps(data).encode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_voc_label(is_training):\n voc_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_dir, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = voc_dir\n if os.path.isdir(os.path.join(voc_dir, 'Images')):\n image_dir = os.path.join(voc_dir, 'Images')\n if os.path.isdir(os.path.join(voc_dir, 'Annotations')):\n anno_dir = os.path.join(voc_dir, 'Annotations')\n\n if not is_training:\n data_dir = config.voc_root\n json_file = os.path.join(data_dir, config.instances_set.format(sub_dir))\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n img_id = get_imageId_from_fileName(file_name)\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.coco_classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(float(bnd_box.find('xmin').text)) - 1\n y_min = int(float(bnd_box.find('ymin').text)) - 1\n x_max = int(float(bnd_box.find('xmax').text)) - 1\n y_max = int(float(bnd_box.find('ymax').text)) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict", "def get_encoder_class(self,label):\n return len(self.encodeDict[label].classes_)", "def on_label(self, payload):\n pass", "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def save(self, uri: str, class_config: 'ClassConfig',\n crs_transformer: 'CRSTransformer') -> None:\n from rastervision.core.data import ObjectDetectionGeoJSONStore\n\n label_store = ObjectDetectionGeoJSONStore(\n uri=uri,\n class_config=class_config,\n crs_transformer=crs_transformer)\n label_store.save(self)", "def encode_label(label: np.array, nb_classes: int):\n encoded = np.zeros(nb_classes)\n encoded[int(label)] = 1.\n return encoded", "def label_transformation(self, ind_task, label):\n\n # if self.disjoint class 0 of second task become class 10, class 1 -> class 11, ...\n if self.disjoint_classes:\n label = label + self.num_classes * ind_task\n\n return label", "def get_classification(self, image):\n # Image pre-processing pipeline\n img = cv2.resize(image, None, fx=0.5, fy=0.5)\n img = img.astype(np.float32)\n img = keras.applications.vgg16.preprocess_input(img)\n # Execute prediction\n probs = self.model.predict(np.array([img]), batch_size=1, verbose=1)[0]\n # get label with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n label = self.predictionary[g_x]\n rospy.loginfo(\"label: %d, conf: %f, %f, %f, %f\", g_x, probs[0], probs[1], probs[2], probs[3])\n return label", "def regress_by_class(self, rois, label, bbox_pred, img_meta):\n assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)\n\n if not self.reg_class_agnostic:\n label = label * 4\n inds = torch.stack((label, label + 1, label + 2, label + 3), 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size(1) == 4\n\n if rois.size(1) == 4:\n new_rois = self.bbox_coder.decode(\n rois, bbox_pred, max_shape=img_meta['img_shape'])\n else:\n bboxes = self.bbox_coder.decode(\n rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])\n new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n return new_rois", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def publish_response(class_label):\n client = KProducer(config=publisher_config)\n client.produce(class_label, PUBLISHER_TOPIC)", "def __create_label_file(self, species_list: List[str]) -> None:\n\n nips4bplus_filtered_audio_folder = self.file_manager.data_folder(\"nips4bplus_filtered\", \"audio\")\n nips4bplus_audio_folder = self.file_manager.data_folder(\"nips4bplus\", \"audio\")\n\n nips4b_species_list = self.download_nips4b_species_list()\n\n nips4bplus_selected_labels = []\n nips4bplus_labels = []\n\n species_to_sound_types = self._parse_species_list(species_list, {\"song\", \"call\"})\n\n for file in os.listdir(self.extracted_nips_annotations_folder):\n label_file_path = os.path.join(self.extracted_nips_annotations_folder, file)\n\n def map_class_names(row):\n if row[\"label\"] in ('Unknown', 'Human'):\n return \"noise\"\n\n nips4b_class_name = nips4b_species_list[nips4b_species_list[\"nips4b_class_name\"] == row[\"label\"]]\n scientific_n = nips4b_class_name[\"Scientific_name\"].item()\n sound_t = nips4b_class_name[\"sound_type\"].item()\n\n if len(nips4b_class_name) != 1:\n raise NameError(f\"No unique label found for class {row['label']}\")\n\n if scientific_n not in species_to_sound_types or sound_t not in species_to_sound_types[scientific_n]:\n return \"noise\"\n else:\n return nips4b_class_name[\"class name\"].item()\n\n if file.endswith(\".csv\"):\n try:\n labels = pd.read_csv(label_file_path, names=[\"start\", \"duration\", \"label\"])\n labels[\"label\"] = labels.apply(map_class_names, axis=1)\n except pd.errors.EmptyDataError:\n labels = pd.DataFrame([0, 5, \"noise\"], columns=[\"start\", \"duration\", \"label\"])\n\n file_id = file.lstrip(\"annotation_train\").rstrip(\".csv\")\n\n labels[\"id\"] = f\"nips4b_birds_trainfile{file_id}\"\n labels[\"file_path\"] = f\"nips4b_birds_trainfile{file_id}.wav\"\n labels[\"start\"] = labels[\"start\"] * 1000\n labels[\"end\"] = labels[\"start\"] + labels[\"duration\"] * 1000\n\n contains_selected_species = False\n for idx, label in labels.iterrows():\n class_name = nips4b_species_list[nips4b_species_list[\"class name\"] == label[\"label\"]]\n\n if label[\"label\"] != \"noise\" and class_name[\"Scientific_name\"].item() in species_to_sound_types:\n contains_selected_species = True\n if contains_selected_species:\n nips4bplus_selected_labels.append(labels)\n\n labels = labels[[\"id\", \"file_path\", \"start\", \"end\", \"label\"]]\n\n self.append = nips4bplus_labels.append(labels)\n\n nips4bplus_labels = pd.concat(nips4bplus_labels)\n self._save_label_file(nips4bplus_labels, \"nips4bplus\")\n if len(nips4bplus_selected_labels) > 0:\n nips4bplus_selected_labels = pd.concat(nips4bplus_selected_labels)\n else:\n nips4bplus_selected_labels = pd.DataFrame(columns=[\"id\", \"file_path\", \"label\", \"start\", \"end\"])\n\n self._save_label_file(nips4bplus_selected_labels, \"nips4bplus_filtered\")\n\n for dataset in [\"train\", \"test\"]:\n folder_path = os.path.join(self.extracted_nips_audio_folder, dataset)\n FileManager.copytree(folder_path, nips4bplus_filtered_audio_folder)\n FileManager.copytree(folder_path, nips4bplus_audio_folder)\n\n # remove audio files without labels\n for file in os.listdir(nips4bplus_filtered_audio_folder):\n if nips4bplus_selected_labels[nips4bplus_selected_labels[\"file_path\"] == file].empty:\n os.remove(os.path.join(nips4bplus_filtered_audio_folder, file))\n for file in os.listdir(nips4bplus_audio_folder):\n if nips4bplus_labels[nips4bplus_labels[\"file_path\"] == file].empty:\n os.remove(os.path.join(nips4bplus_audio_folder, file))", "def serialize_sample(img, label):\n # Create a dictionary mapping the feature name to the tf.train.Example-compatible data type.\n features = {\n 'img': _bytes_feature(tf.io.serialize_tensor(img)),\n 'label': _bytes_feature(tf.io.serialize_tensor(label)),\n }\n # Create a Features message using tf.train.Example\n sample = tf.train.Example(features=tf.train.Features(feature=features))\n return sample.SerializeToString()", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def prepare_label(input_batch, new_size, num_classes, one_hot=True, task='seg'):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.\n if task == 'seg':\n input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.\n if one_hot:\n input_batch = tf.one_hot(input_batch, depth=num_classes)\n return input_batch", "def test_labels_encoder_no_classes(self):\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())\n with pytest.warns(YellowbrickWarning, match=\"could not determine class labels\"):\n assert oz._labels() is None", "def process_classified_message(self, message, classification): \n pass", "def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):", "def _serialize_example(self, id: str, image: PIL.Image.Image, **kwargs) -> str:\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n height = image.height\n width = image.width\n depth = len(image.getbands())\n image_bytes = image.tobytes()\n\n feature = {\n 'id': utils.bytes_feature(tf.compat.as_bytes(id)),\n 'image_raw': utils.bytes_feature(image_bytes),\n 'height': utils.int64_feature(height),\n 'width': utils.int64_feature(width),\n 'depth': utils.int64_feature(depth),\n }\n\n if \"label\" in kwargs:\n feature[\"label\"] = utils.bytes_feature(tf.compat.as_bytes(kwargs[\"label\"]))\n\n for key in set(kwargs.keys()).difference({'label'}):\n value = kwargs[key]\n if isinstance(value, int) or isinstance(value, bool):\n feature[key] = utils.int64_feature(value)\n elif isinstance(value, str):\n feature[key] = utils.bytes_feature(tf.compat.as_bytes(value))\n elif isinstance(value, float):\n feature[key] = utils.float_feature(value)\n\n # Create a Features message using tf.train.Example.\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "def encodeToLabels(self, gt_instances):\n raw_boxes_xyzwhd = np.zeros((self.config_data[\"max_boxes_per_frame\"], 7))\n ### initialize gronud truth labels as np.zeors ###\n gt_labels = np.zeros(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes)] + \\\n [len(self.config_data[\"all_classes\"]) + 7])\n\n ### start transferring box to ground turth label format ###\n for i in range(len(gt_instances[\"classes\"])):\n if i > self.config_data[\"max_boxes_per_frame\"]:\n continue\n class_name = gt_instances[\"classes\"][i]\n box_xyzwhd = gt_instances[\"boxes\"][i]\n class_id = self.config_data[\"all_classes\"].index(class_name)\n if i < self.config_data[\"max_boxes_per_frame\"]:\n raw_boxes_xyzwhd[i, :6] = box_xyzwhd\n raw_boxes_xyzwhd[i, 6] = class_id\n class_onehot = helper.smoothOnehot(class_id, len(self.config_data[\"all_classes\"]))\n \n exist_positive = False\n\n grid_strid = self.grid_strides\n anchor_stage = self.anchor_boxes\n box_xyzwhd_scaled = box_xyzwhd[np.newaxis, :].astype(np.float32)\n box_xyzwhd_scaled[:, :3] /= grid_strid\n anchorstage_xyzwhd = np.zeros([len(anchor_stage), 6])\n anchorstage_xyzwhd[:, :3] = np.floor(box_xyzwhd_scaled[:, :3]) + 0.5\n anchorstage_xyzwhd[:, 3:] = anchor_stage.astype(np.float32)\n\n iou_scaled = helper.iou3d(box_xyzwhd_scaled, anchorstage_xyzwhd, \\\n self.input_size)\n ### NOTE: 0.3 is from YOLOv4, maybe this should be different here ###\n ### it means, as long as iou is over 0.3 with an anchor, the anchor\n ### should be taken into consideration as a ground truth label\n iou_mask = iou_scaled > 0.3\n\n if np.any(iou_mask):\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, zind, iou_mask, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, iou_mask, 6:7] = 1.\n gt_labels[xind, yind, zind, iou_mask, 7:] = class_onehot\n exist_positive = True\n\n if not exist_positive:\n ### NOTE: this is the normal one ###\n ### it means take the anchor box with maximum iou to the raw\n ### box as the ground truth label\n anchor_ind = np.argmax(iou_scaled)\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n gt_labels[xind, yind, zind, anchor_ind, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, anchor_ind, 6:7] = 1.\n gt_labels[xind, yind, zind, anchor_ind, 7:] = class_onehot\n\n has_label = False\n for label_stage in gt_labels:\n if label_stage.max() != 0:\n has_label = True\n gt_labels = [np.where(gt_i == 0, 1e-16, gt_i) for gt_i in gt_labels]\n return gt_labels, has_label, raw_boxes_xyzwhd", "def create_label_map(self, outpath):\n cnt = 1\n with open(outpath, 'w') as fp:\n for itm in self.clazzes:\n fp.write('item {\\n')\n fp.write('\\tname: \"{}\"\\n'.format(itm))\n fp.write('\\tid: {}\\n'.format(cnt))\n fp.write('}\\n')\n cnt += 1", "def create_label(self, loaded_img, loaded_label):\n _, label = cv2.threshold(loaded_label, 120, 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n label = cv2.dilate(label, kernel, iterations=1)\n _, contours, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if contours:\n areas = [cv2.contourArea(cnt) for cnt in contours]\n x, y, w, h = cv2.boundingRect(contours[np.argmax(areas)])\n label = label[y:y + h, x:x + w]\n return loaded_img.astype(np.float32) / 255, cv2.resize(label, (self.label_w, self.label_h)).astype(np.float32) / 255\n else:\n return loaded_img.astype(np.float32) / 255, np.zeros([self.label_h, self.label_w], dtype=np.float32)", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def get_labels(self, labels_from_json):\n self.raw_labels = labels_from_json", "def post_label():\n label_id = dao.set_label(id=str(uuid.uuid4()),\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def convert_pickle_to_tfrecord(input_files, output_file):\n print('Generating %s' % output_file)\n with tf.python_io.TFRecordWriter(output_file) as writer:\n # draw 10 random number for getting 10 random classes from Imagenet (fixed value for reproducibility)\n # class_id = [145, 153, 289, 404, 405, 510, 805, 817, 867, 950] # random.sample(range(0, 999), 10)\n # class_id = [153, 156, 161, 174, 197, 207, 215, 216, 218, 224, 227, 230, 236, 254, 260] # 15 dog classes (also used in DAC)\n\n # count = np.zeros(shape=len(class_id))\n for input_file in input_files:\n data_dict = read_pickle_from_file(input_file)\n data = data_dict['data']\n mean_img = data_dict['mean']\n labels = data_dict['labels']\n # Labels are indexed from 1, shift it so that indexes start at 0 (imagenet)\n labels = [i - 1 for i in labels]\n\n num_entries_in_batch = len(labels)\n print('Converting %s' % input_file)\n for i in range(num_entries_in_batch):\n # if labels[i] in class_id:\n # labels[i] = class_id.index(labels[i]) # put the labels into the range of 0 to no. clusters\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(64),\n 'width': _int64_feature(64),\n 'depth': _int64_feature(3),\n 'image': _bytes_feature(data[i].tobytes()),\n 'mean_img': _bytes_feature(mean_img.tobytes()),\n 'label': _int64_feature(labels[i])\n }))\n writer.write(example.SerializeToString())\n # count[labels[i]] += 1 # count number of samples per class\n # for idx, num in enumerate(count):\n # print('Number of samples of class %d: %d' % (idx, num))\n # print('Total Number of samples %d' % np.sum(count))", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def label_names(self) -> Strings:\n\n try:\n if self._le:\n return self._le.classes_.tolist()\n except AttributeError:\n self.logger.warning('AttributeError: LabelEncoder was not found.')\n self.logger.warning('No LabelEncoder. Please call label_encoder first.')\n return None", "def __init__(self, load_instance_masks=False, label_map_proto_file=None, use_display_name=False):\n self.keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/key/sha256':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/source_id':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, 1),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, 1),\n # Object boxes and classes.\n 'image/object/bbox/xmin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(tf.int64),\n 'image/object/class/text':\n tf.VarLenFeature(tf.string),\n 'image/object/area':\n tf.VarLenFeature(tf.float32),\n 'image/object/is_crowd':\n tf.VarLenFeature(tf.int64),\n 'image/object/difficult':\n tf.VarLenFeature(tf.int64),\n 'image/object/group_of':\n tf.VarLenFeature(tf.int64),\n }\n self.items_to_handlers = {\n fields.InputDataFields.image: slim_example_decoder.Image(\n image_key='image/encoded', format_key='image/format', channels=3),\n fields.InputDataFields.source_id: (\n slim_example_decoder.Tensor('image/source_id')),\n fields.InputDataFields.key: (\n slim_example_decoder.Tensor('image/key/sha256')),\n fields.InputDataFields.filename: (\n slim_example_decoder.Tensor('image/filename')),\n # Object boxes and classes.\n fields.InputDataFields.groundtruth_boxes: (\n slim_example_decoder.BoundingBox(\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/')),\n fields.InputDataFields.groundtruth_area: slim_example_decoder.Tensor(\n 'image/object/area'),\n fields.InputDataFields.groundtruth_is_crowd: (\n slim_example_decoder.Tensor('image/object/is_crowd')),\n fields.InputDataFields.groundtruth_difficult: (\n slim_example_decoder.Tensor('image/object/difficult')),\n fields.InputDataFields.groundtruth_group_of: (\n slim_example_decoder.Tensor('image/object/group_of'))\n }\n if load_instance_masks:\n self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.float32)\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_instance_masks] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/mask', 'image/height', 'image/width'],\n self._reshape_instance_masks))\n # TODO: Add label_handler that decodes from 'image/object/class/text'\n # primarily after the recent tf.contrib.slim changes make into a release\n # supported by cloudml.\n label_handler = slim_example_decoder.Tensor('image/object/class/label')\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_classes] = label_handler", "def __str__(self):\n return str(self.image) + \"\\n\" + str(self.label)", "def label_generator(predictions, processor, filename):\n # Hash predictions for always unique filename\n hashed = hashlib.sha1(predictions).hexdigest()\n\n # Get label from keras predictor\n label = processor(predictions, top=1)[0][0][1]\n\n # Capture original image suffix\n suffix = \"\".join(Path(filename).suffixes)\n\n new_label = f\"{label}_{hashed}{suffix}\"\n\n return new_label", "def test_labels(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = LabelEncoder().fit(y)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def test_decode_labels_from_numeric(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = np.random.randint(0, 5, 100)\n decoded = classes[y]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = LabelEncoder().fit(decoded)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._decode_labels(y), decoded)", "def load_label(self, pr):\n return", "def add_label(self, label, name, label_type):\n assert label_type in ['label', 'prediction', 'guide'], \\\n \"{} not in ['label', 'prediction', 'guide']: Must select an acceptable type\".format(label_type)\n check_numpy_table(label, req_fields=('raw_start', 'raw_length', 'reference_index',\n 'kmer', 'posterior_probability'))\n\n # label.sort(order=['raw_start'], kind='mergesort')\n # check the labels are in the correct format\n assert min(label[\"raw_start\"]) >= 0, \"Raw start cannot be less than 0\"\n assert 0 <= max(label[\"posterior_probability\"]) <= 1, \\\n \"posterior_probability must be between zero and one {}\".format(row[\"posterior_probability\"])\n\n # make sure last label can actually index the signal correctly\n try:\n self.scaled_signal[label[-1][\"raw_start\"]:label[-1][\"raw_start\"] + label[-1][\"raw_length\"]]\n except IndexError:\n raise IndexError(\"labels are longer than signal\")\n\n label1 = np.sort(label, order=['raw_start'], kind='mergesort')\n\n # infer strand alignment of read\n if label1[0][\"reference_index\"] >= label1[-1][\"reference_index\"]:\n minus_strand = True\n else:\n minus_strand = False\n if self.minus_strand is not None:\n if label[0][\"raw_start\"] != label[-1][\"raw_start\"]:\n assert self.minus_strand == minus_strand, \"New label has different strand direction, check label\"\n else:\n self.minus_strand = minus_strand\n\n # set label with the specified name\n if label_type == 'label':\n self.label[name] = label\n elif label_type == 'prediction':\n self.prediction[name] = label\n elif label_type == 'guide':\n self.guide[name] = label", "def _label_loader(self, prefix):\n return self._base_loader(prefix, 'labels')", "def _classification(text_path_list, id_list, label_list):\n textnum = len(text_path_list)\n batched_num = ((textnum - 1) // classify.BATCH_SIZE + 1) * classify.BATCH_SIZE\n for i in range(batched_num - textnum):\n text_path_list.append(text_path_list[0])\n id_list.append(id_list[0])\n annotations = classify_obj.inference(text_path_list, id_list, label_list) #\n return annotations[0:textnum]", "def class_labels(self):\n return self._class_labels", "def test_decode_labels_unknown_class(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n # Remove class \"c\" from the known array labels\n classes = np.array([\"a\", \"b\", \"d\", \"e\"])\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n with pytest.raises(ModelError, match=\"could not decode\"):\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = dict(zip(classes, range(len(classes))))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n with pytest.raises(ModelError, match=\"cannot decode class 'c' to label\"):\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = LabelEncoder().fit(classes[np.random.randint(0, 4, 100)])\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n with pytest.raises(ModelError, match=\"could not decode\"):\n npt.assert_array_equal(oz._decode_labels(y), decoded)", "def build_label_transform():\n\n return RobustLabelEncoder(\n labels=['0'], fill_label_value='1', include_unseen_class=True\n )", "def get_train_labels(self, window, scene):\n pass", "def create_cat_tf_example(label, label_text, img_path, img_name):\n\t\n\twith tf.gfile.FastGFile(img_path + img_name, 'rb') as fid:\n\t encoded_image = fid.read() \n\n\tencoded_image_data = sess.run(resize_image, {encoded_jpg_ph: encoded_image}) # I think this may not be the right way of doing this\n\tb_filename = str.encode(img_name)\n\n\timage_format = b'jpg'\n\txmins = [10.0 / width]\n\txmaxs = [(width - 10) / width]\n\tymins = [10.0 / height]\n\tymaxs = [(height - 10.0) / height]\n\t# classes_text = [str.encode(label_text)]\n\tclasses_text = []\n\tif label_text:\n\t\tclasses_text.append(label_text.encode('utf8'))\n\tclasses = []\n\t# if label == 1:\n\tclasses.append(int(label))\n\t# print(classes_text, classes, b_filename)\n\ttf_example = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(b_filename),\n\t\t'image/source_id': dataset_util.bytes_feature(b_filename),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n\t\t# 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature(image_format),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t}))\n\treturn tf_example", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def label(d, X, ind_class0, ind_class1, N, V, binary):\n if binary == True:\n K = 1\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = 0.0\n C[ind_class1, :] = 1.0\n else:\n K = 2\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = torch.tensor([1.0, 0.0])\n C[ind_class1, :] = torch.tensor([0.0, 1.0])\n\n X_train = X[:N, :]\n X_val = X[N:, :]\n C_train = C[:N, :]\n C_val = C[N:, :]\n\n return [X_train, C_train, X_val, C_val, d, K]", "def get_labels(self):\r\n raise NotImplementedError()", "def encodeToCartBoxesLabels(self, gt_instances):\n raw_boxes_xywh = np.zeros((self.config_data[\"max_boxes_per_frame\"], 5))\n ### initialize gronud truth labels as np.zeros ###\n gt_labels = np.zeros(list(self.cart_shape[1:3]) + \\\n [len(self.anchor_boxes_cart)] + \\\n [len(self.config_data[\"all_classes\"]) + 5]) \n\n ### start transferring box to ground turth label format ###\n for i in range(len(gt_instances[\"classes\"])):\n if i > self.config_data[\"max_boxes_per_frame\"]:\n continue\n class_name = gt_instances[\"classes\"][i]\n box_xywh = gt_instances[\"cart_boxes\"][i]\n class_id = self.config_data[\"all_classes\"].index(class_name)\n if i <= self.config_data[\"max_boxes_per_frame\"]:\n raw_boxes_xywh[i, :4] = box_xywh\n raw_boxes_xywh[i, 4] = class_id\n class_onehot = helper.smoothOnehot(class_id, \\\n len(self.config_data[\"all_classes\"]))\n exist_positive = False\n grid_strid = self.cart_grid_strides\n anchors = self.anchor_boxes_cart\n box_xywh_scaled = box_xywh[np.newaxis, :].astype(np.float32)\n box_xywh_scaled[:, :2] /= grid_strid\n anchors_xywh = np.zeros([len(anchors), 4])\n anchors_xywh[:, :2] = np.floor(box_xywh_scaled[:, :2]) + 0.5\n anchors_xywh[:, 2:] = anchors.astype(np.float32)\n\n iou_scaled = helper.iou2d(box_xywh_scaled, anchors_xywh)\n ### NOTE: 0.3 is from YOLOv4, maybe this should be different here ###\n ### it means, as long as iou is over 0.3 with an anchor, the anchor\n ### should be taken into consideration as a ground truth label\n iou_mask = iou_scaled > 0.3\n\n if np.any(iou_mask):\n xind, yind = np.floor(np.squeeze(box_xywh_scaled)[:2]).astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, iou_mask, 0:4] = box_xywh\n gt_labels[xind, yind, iou_mask, 4:5] = 1.\n gt_labels[xind, yind, iou_mask, 5:] = class_onehot\n exist_positive = True\n\n if not exist_positive:\n ### NOTE: this is the normal one ###\n ### it means take the anchor box with maximum iou to the raw\n ### box as the ground truth label\n iou_mask = iou_scaled == iou_scaled.max()\n\n if np.any(iou_mask):\n xind, yind = np.floor(np.squeeze(box_xywh_scaled)[:2]).astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, iou_mask, 0:4] = box_xywh\n gt_labels[xind, yind, iou_mask, 4:5] = 1.\n gt_labels[xind, yind, iou_mask, 5:] = class_onehot\n\n has_label = False\n if gt_labels.max() != 0:\n has_label = True\n gt_labels = np.where(gt_labels == 0, 1e-16, gt_labels)\n return gt_labels, has_label, raw_boxes_xywh", "def inference_on_data(image) -> str:\n result = inference_model(image)\n class_label = torch.argmax(result[0])\n # Print to log acts as a proxy of saving to an actual DB\n print(f'Image Class : {class_label}')\n return str(class_label)", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [self.classes[np.argmax(elem)] for elem in self.data]\n return tags", "def label(self):\r\n raise NotImplementedError", "def create_metering_label(self, body=None):\r\n return self.post(self.metering_labels_path, body=body)", "def test_class_encoder_strings():\n\n unk_label = '<unk>'\n encoder = ClassEncoder(unk_label=unk_label)\n encoder.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\", unk_label])\n\n result_transf = encoder.transform([\"tokyo\", \"tokyo\", \"paris\", unk_label])\n result_inv = encoder.inverse_transform([2, 2, 1, -1])\n\n assert np.array_equal(encoder.classes_,\n np.array(['amsterdam', 'paris', 'tokyo', unk_label]))\n assert encoder.classes_dict == {unk_label: -1, 'amsterdam': 0, 'paris': 1, 'tokyo': 2}\n assert np.array_equal(result_transf, np.array([2, 2, 1, -1]))\n assert np.array_equal(result_inv, np.array(['tokyo', 'tokyo', 'paris', unk_label]))", "def __init__(self, image_dir, instances_json, classes_file, image_size=(64, 64), mask_size=16,\n normalize_images=True, max_samples=None, min_object_size=0.01,\n min_objects_per_image=1, max_objects_per_image=8,\n include_other=False, instance_whitelist=None):\n super(Dataset, self).__init__()\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.set_image_size(image_size)\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n self.classes = []\n annotations = list(paths.list_files(os.path.join(instances_json), validExts=(\".xml\")))\n\n # with open(instances_json, 'r') as f:\n # instances_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n new_image_ids = []\n self.image_id_to_objects = defaultdict(list)\n\n for j, ann in enumerate(annotations):\n\n tree = ET.parse(ann)\n anno_xml = tree.getroot()\n # anno_json = open(ann, 'r')\n # image_id = anno_xml.find('path').text\n image_id = j\n filename = anno_xml.find('filename').text\n size = anno_xml.findall('size')[0]\n width = size.find('width').text\n height = size.find('height').text\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n cls = open(classes_file, 'r')\n\n object_idx_to_name = {}\n all_instance_categories = []\n for i, category_data in enumerate(cls):\n category_id = i\n category_name = category_data\n all_instance_categories.append(str(category_name[:-1]))\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n category_whitelist = set(instance_whitelist)\n\n for object_data in anno_xml.findall('object'):\n bndbox = object_data.findall('bndbox')[0]\n xmin = bndbox.find('xmin').text\n ymin = bndbox.find('ymin').text\n xmax = bndbox.find('xmax').text\n ymax = bndbox.find('ymax').text\n w = int(xmax) - int(xmin)\n h = int(ymax) - int(ymin)\n # _, _, w, h = object_data['bndbox']\n # Esto no se que es lo que hace exactamente\n W, H = self.image_id_to_size[image_id]\n W = int(W)\n H = int(H)\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_data.find('name').text\n\n if object_name not in self.classes:\n self.classes.append(object_name)\n object_data.find('name').set(\"id\", str(self.classes.index(object_name)))\n # object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n # assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n self.num_objects = len(self.vocab['object_idx_to_name'])\n\n # Prune images that have too few or too many objects\n total_objs = 0\n for image_id in self.image_ids:\n # Hay que comprobar o cambiar esto a un id numerico por que al ser string no puede usarse como clave o asi para esto y da error. Investigar que se puede hacer con esto\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects_per_image <= num_objs <= max_objects_per_image:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n self.vocab['pred_idx_to_name'] = [\n '__in_image__',\n 'left of',\n 'right of',\n 'above',\n 'below',\n 'inside',\n 'surrounding',\n ]\n self.vocab['pred_name_to_idx'] = {}\n for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n self.vocab['pred_name_to_idx'][name] = idx", "def __repr__(self):\n\n return \"<Image image_id=%s label=%s>\" % (self.id, self.image_label)", "def test_decode_labels_from_strings(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n decoded = classes[np.random.randint(0, 5, 100)]\n y = np.array([v.upper() for v in decoded])\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = {c.upper(): c for c in classes}\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n def inverse_transform(self, y):\n return np.array([yi.lower() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=L2UTransformer())\n npt.assert_array_equal(oz._decode_labels(y), decoded)", "def _convert_raw_example(\n self,\n mode_dict: MutableMapping[str, Any],\n example: Mapping[str, Any]) -> ProcessedExample:\n img_path = example['image_path_or_name']\n base_name = os.path.basename(img_path)\n img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))\n img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,\n filename=base_name)\n\n img_format = 'JPEG'\n key = hashlib.sha256(img_bytes.read()).hexdigest()\n img_bytes.seek(0)\n\n bboxes = example['bbox_info']\n processed_bboxes = []\n\n img_height = img_shape[0]\n img_width = img_shape[1]\n\n img_id = example.get('image_id', self._get_id('image'))\n mode_dict['images'].append({\n 'id': img_id,\n 'width': img_width,\n 'height': img_height,\n })\n\n for bbox_info in bboxes:\n annotations_bbox = bbox_info['bbox']\n bbox = bbox_utils.BBox(bbox=annotations_bbox,\n fmt=self.builder_config.bbox_format,\n img_width=img_width,\n img_height=img_height)\n label = bbox_info['label']\n if isinstance(label, int):\n text = str(label)\n elif isinstance(label, six.string_types):\n text = label\n label = bbox_info.get('label_id', self._get_label_id(text))\n else:\n raise TypeError(\n 'The provided label was not a string or int. Got: {}'.format(\n type(label)))\n\n if label >= self.builder_config.num_labels:\n raise ValueError('Provided label {} for {} is greater than '\n 'the number of classes specified. num_classes: '\n '{}'.format(label,\n base_name,\n self.builder_config.num_labels))\n\n annotation_id = example.get('annotation_id', self._get_id('annotation'))\n bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)\n xmin, xmax, ymin, ymax = bbox.as_tuple()\n bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)\n mode_dict['annotations'].append({\n 'id': annotation_id,\n 'image_id': img_id,\n 'category_id': label,\n 'bbox': annotations_bbox,\n })\n\n processed_bboxes.append({\n 'bbox': tfds.features.BBox(ymin=ymin,\n xmin=xmin,\n ymax=ymax,\n xmax=xmax),\n 'class': {\n 'text': text,\n 'label': label,\n }\n })\n\n return img_id, {\n 'image': {\n 'height': img_width,\n 'width': img_shape[1],\n 'filename': img_path,\n 'source_id': img_id,\n 'encoded': img_bytes,\n 'format': img_format,\n 'key': {\n 'sha256': key,\n },\n 'object': processed_bboxes,\n }\n }", "def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y", "def getLabelEncoder():\n classes = list(string.letters + string.digits)\n classes.append('')\n le = LabelEncoder()\n le.fit(classes)\n\n return le", "def get_train_labels(self, window: Box, scene: Scene) -> Labels:\n raise NotImplementedError()", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def get_classification(self, image):\n # return TrafficLight.RED\n # TODO implement light color prediction\n # creating an image object \n img_np = np.array(image) \n\n # convert np array to tensor\n input_tensor = tf.convert_to_tensor(img_np)\n\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis, ...]\n\n\n detections = self.loaded(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n\n # detection_classes should be ints.\n detections_dict = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n\n\n # detection_classes should be ints.\n detections_dict['detection_classes'] = detections_dict['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n\n # DEBUG - can do it in a cleaner way :0\n tl_classes = {3: 'green', 2: 'red'}\n top_classes_prediction = list(detections_dict['detection_classes']+label_id_offset)[:5] \n #print(top_classes_prediction)\n for i in range(len(top_classes_prediction)):\n if top_classes_prediction[i] == 2:\n top_classes_prediction[i] = 'green'\n elif top_classes_prediction[i] == 3:\n top_classes_prediction[i] = 'red'\n\n\n #print(\"--------->\", image_path, \"<-----------\")\n #print( top_classes_prediction ) \n #print(detections_dict['detection_scores'][:5], '\\n' )\n\n # basic red tl logic\n if top_classes_prediction[0] == 'red' and detections_dict['detection_scores'][0] >= 0.60:\n #print(\"-------------> RED TRAFFIC LIGHT <----------------\\n\")\n self.current_light = TrafficLight.RED\n #rospy.logwarn( \"----------------- Taffic light is RED !!! -------------------- \" )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n else:\n #print(\"No red traffic is detected\\n\")\n self.current_light = TrafficLight.GREEN\n #rospy.logwarn( \"----------------- You're good to go !!! --------: {0} - {1} \".format(top_classes_prediction[0], detections_dict['detection_scores'][0]) )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n\n return self.current_light", "def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y", "def create_labels(filename, class_indices):\n \n _logger.debug(\"Mapping labels\")\n label={}\n label['category']=[]\n for key in class_indices:\n label['category'].append({\n 'name' : key,\n 'index' : class_indices[key]\n })\n label_path = os.path.join(config.TRAINED_MODELS_DATA, filename)\n with open(os.path.join(label_path, 'labels.txt'), 'w') as outfile:\n json.dump(label, outfile)\n return label_path", "def prepare_label(self, input_batch, new_size):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch,\n new_size) # As labels are integer numbers, need to use NN interp.\n input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.\n input_batch = tf.one_hot(input_batch, depth=self.n_classes)\n return input_batch", "def label_block_serializer(batch):\n serializer = sl_mpls_pb2.SLMplsLabelBlockMsg()\n if 'blocks' in batch:\n blk_list=[]\n for block in batch['blocks']:\n b = sl_mpls_pb2.SLMplsLabelBlockKey()\n if 'block_size' in block:\n b.LabelBlockSize = block['block_size']\n if 'start_label' in block:\n b.StartLabel = block['start_label']\n blk_list.append(b)\n serializer.MplsBlocks.extend(blk_list)\n return serializer", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def transform_with_label(aug):\n\n geometric_tfx = get_geometric_transformer(aug)\n intensity_tfx = get_intensity_transformer(aug)\n\n def transform(comp, c_label, c_img, use_onehot, nclass, **kwargs):\n \"\"\"\n Args\n comp: a numpy array with shape [H x W x C + c_label]\n c_label: number of channels for a compact label. Note that the current version only supports 1 slice (H x W x 1)\n nc_onehot: -1 for not using one-hot representation of mask. otherwise, specify number of classes in the label\n\n \"\"\"\n comp = copy.deepcopy(comp)\n if (use_onehot is True) and (c_label != 1):\n raise NotImplementedError(\"Only allow compact label, also the label can only be 2d\")\n assert c_img + 1 == comp.shape[-1], \"only allow single slice 2D label\"\n\n # geometric transform\n _label = comp[..., c_img ]\n _h_label = np.float32(np.arange( nclass ) == (_label[..., None]) )\n comp = np.concatenate( [comp[..., :c_img ], _h_label], -1 )\n comp = geometric_tfx(comp)\n # round one_hot labels to 0 or 1\n t_label_h = comp[..., c_img : ]\n t_label_h = np.rint(t_label_h)\n assert t_label_h.max() <= 1\n t_img = comp[..., 0 : c_img ]\n\n # intensity transform\n t_img = intensity_tfx(t_img)\n\n if use_onehot is True:\n t_label = t_label_h\n else:\n t_label = np.expand_dims(np.argmax(t_label_h, axis = -1), -1)\n return t_img, t_label\n\n return transform", "def set_gt_label(\n self, value: Union[np.ndarray, torch.Tensor, Sequence[Number], Number]\n ) -> 'DataSample':\n label = format_label(value, self.get('num_classes'))\n if 'gt_label' in self:\n self.gt_label.label = label.label\n else:\n self.gt_label = label\n return self", "def new_label(self, context, payload):\n\n labels = GmailActions.labels(context)['labels']\n label_id = \"\"\n\n for label in labels:\n if label['name'] == payload['name']:\n label_id = label['id']\n break\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"labels/{label_id}\"\n response = util.rest(\"GET\", url, access_token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text)", "def create_tf_example(group, path, label_map):\n\n #load image and extract attributes (width, height, filename)\n with tf.gfile.GFile(os.path.join(path, \"{}\".format(group.filename)), \"rb\") as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = group.filename.encode(\"utf8\")\n image_format = b\"jpg\"\n \n #tf.train.Example() expects several objects in lists\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for index, row in group.object.iterrows():\n #Extract bounding box\n xmins.append(row[\"xmin\"] / width)\n xmaxs.append(row[\"xmax\"] / width)\n ymins.append(row[\"ymin\"] / height)\n ymaxs.append(row[\"ymax\"] / height)\n\n #Extract class name and retrieve class id\n #classes_text.append(row[\"class\"].encode(\"utf8\"))\n class_index = label_map.get(str(row[\"class\"]))\n \n #Check if class id could be retrieved\n assert (\n class_index is not None\n ), \"class label: `{}` not found in label_map: {}\".format(\n row[\"class\"], label_map\n )\n\n #For troubleshooting only\n print(f\"{filename} has class_index {class_index} and class {row['class']}\")\n\n classes.append(class_index)\n\n #Build tf_example object\n tf_example = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"image/height\": dataset_util.int64_feature(height),\n \"image/width\": dataset_util.int64_feature(width),\n \"image/filename\": dataset_util.bytes_feature(filename),\n \"image/source_id\": dataset_util.bytes_feature(filename),\n \"image/encoded\": dataset_util.bytes_feature(encoded_jpg),\n \"image/format\": dataset_util.bytes_feature(image_format),\n \"image/object/bbox/xmin\": dataset_util.float_list_feature(xmins),\n \"image/object/bbox/xmax\": dataset_util.float_list_feature(xmaxs),\n \"image/object/bbox/ymin\": dataset_util.float_list_feature(ymins),\n \"image/object/bbox/ymax\": dataset_util.float_list_feature(ymaxs),\n \"image/object/class/text\": dataset_util.bytes_list_feature(\n classes_text\n ),\n \"image/object/class/label\": dataset_util.int64_list_feature(classes),\n }\n )\n )\n return tf_example", "def convert_to_frcnn(label: [], image_path: str, **_) -> []:\n\n # Get the label data in the dataset format\n unicode, xmin, ymin, abs_bb_width, abs_bb_height = label.split()\n\n # Make sure the class is a string\n class_name = str(unicode)\n\n # Cast each data to int\n xmin = int(xmin)\n ymin = int(ymin)\n abs_bb_width = int(abs_bb_width)\n abs_bb_height = int(abs_bb_height)\n\n # Calculate the top-right coordinates of the bounding box\n xmax = xmin + abs_bb_width\n ymax = ymin + abs_bb_height\n\n return [to_file_name(image_path, 'jpg'),\n xmin,\n ymin,\n xmax,\n ymax,\n class_name]", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [[self.classes[np.argmax(subrow)] for subrow in row] for row in self.data]\n return tags", "def mix_labellers(labellers, class_name=\"MixtureLabeller\"):\n return type(class_name, labellers, {})", "def get_labels_from_annotation_batch(annotation_batch_tensor, class_labels):\n \n batch_labels = tf.map_fn(fn=lambda x: get_labels_from_annotation(annotation_tensor=x, class_labels=class_labels),\n elems=annotation_batch_tensor,\n dtype=tf.float32)\n \n return batch_labels", "def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]", "def dump(self, img_labels, images_base_directory, destination_pickle_path, destination_pickle_file_name, preprocessing_transformer):\n # Load images_base_directory\n # files_per_pickle = len(img_labels) // parts\n # pickle_part_num = 1\n result = None\n images = []\n for i in range(len(img_labels)):\n file_name = os.path.join(images_base_directory, img_labels.iloc[i,0]) + '.npy'\n image = np.load(file_name)\n h, w = image.shape\n image = torch.from_numpy(image).reshape(1, h, w)\n image = image.float()\n\n # apply preprocessing\n image = preprocessing_transformer(image)\n images.append(image)\n result = {\n \"images\": images\n }\n\n # Save final remaining parts\n self._save_part(destination_pickle_path, destination_pickle_file_name, result)", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True", "def _generate_elements(example, label):\n\n class_label = None\n parsed = tf.train.Example.FromString(example.numpy())\n if parsed.features.feature[label].int64_list.value:\n val = parsed.features.feature[label].int64_list.value\n if len(val) > 0:\n class_label = val[0]\n else:\n val = parsed.features.feature[label].bytes_list.value\n if len(val) > 0:\n class_label = val[0].decode()\n return (class_label, parsed)", "def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')", "def on_save_label(self, image_id, label_id):\n logger.info(f\"New label saved for: {image_id} => {label_id}\")", "def get_train_labels(self):\n raise NotImplementedError", "def build_label_transform():\n\n return NALabelEncoder()", "def to_label(self):\n return self.label", "def serialize(self):", "def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)", "def label(self):\n return self._label_shape", "def encode(self, labels: List[str], device: Optional[torch.device] = None) -> Tensor:\n raise NotImplementedError", "def serialize_example(image_inp_string,image_out_string):\n image_inp_shape = tf.image.decode_jpeg(image_inp_string).shape\n image_out_shape = tf.image.decode_jpeg(image_out_string).shape\n feature = {\n\n 'image_input': _bytes_feature(image_inp_string),\n 'image_output':_bytes_feature(image_out_string),\n }\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n #--------------------------------------------------------------------------------------\n\n ###process image", "def get_label(self, uuid):\n return Label.deserialize(self._get_single('labels', {'uuid': uuid}))", "def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))", "def GetClassification(self, *args, **kwargs):\n pass", "def __repr__(self):\n return \"<label: %s, input: %s>\" % (self.label,\n super(LabeledExample, self).__repr__())", "def __init__(self):\n self.image_subscriber = rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, self.imageCallback)\n print 'Waiting for classifier service to come up...'\n rospy.wait_for_service('/classifier_node/classify')\n self.classify_client = rospy.ServiceProxy('/classifier_node/classify', Classify)", "def keras_inference(input_image, model_type, labels, return_image):\r\n # Loading the image\r\n img = image.load_img(input_image, target_size=(50, 50))\r\n # Converting the image to numpy array\r\n x = image.img_to_array(img) \r\n # convert 3D tensor to 4D tensor with shape (1, 512, 512, 3)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n image_to_predict = x.astype('float32')/255\r\n \r\n # image_to_plot = path_to_tensor(input_image)\r\n\r\n # model's weight for localization\r\n model = load_model(model_type)\r\n prediction = model.predict(image_to_predict)\r\n # print(\"X shape : \", x.shape)\r\n # prediction_final = \"Not_cancer: \" + str(np.round(prediction[0][0]*100, decimals = 2)) + \"%\" + \\\r\n # \" | Cancer: \" + str(np.round(prediction[0][1]*100, decimals = 2)) + \"%\"\r\n print(\"Prediction : \",prediction[0])\r\n print(\"Argmax : \", np.argmax(prediction[0]))\r\n confidence = np.max(prediction[0]) * 100\r\n classify = labeled_class[int(np.argmax(prediction[0]))]\r\n print(\"classify :\", classify)\r\n output = {\r\n \"label\": \"{}\".format(task),\r\n \"type\" : \"classification\",\r\n \"output\" : {\r\n \"confidence\" : \"{0:.2f}\".format(round(confidence,2)),\r\n \"results\" : classify,\r\n \"image\" : return_image\r\n }\r\n } \r\n \r\n return output", "def draw_label_on_image(root_folder_path,root_folder_name,img_name,img_type,class_name,bb_color,bb_list):\n img_path=os.path.join(root_folder_path,root_folder_name,img_type,img_name+\".png\")\n img=cv2.imread(img_path)\n for each_bb in bb_list:\n cv2.rectangle(img,(each_bb[0],each_bb[2]),(each_bb[1],each_bb[3]),bb_color,3)\n cv2.putText(img,class_name,(each_bb[0],each_bb[3]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)\n cv2.imwrite(img_path,img)" ]
[ "0.57493436", "0.56853515", "0.5624522", "0.55395037", "0.55254596", "0.550018", "0.54977113", "0.5459444", "0.54317695", "0.5422993", "0.54141396", "0.5414098", "0.54095036", "0.5399831", "0.53884953", "0.53860307", "0.5378658", "0.5374597", "0.5360391", "0.5358538", "0.5353207", "0.5338585", "0.53174466", "0.53094065", "0.529904", "0.5295931", "0.52927417", "0.5287916", "0.5287003", "0.5280808", "0.52744484", "0.52732205", "0.52604514", "0.5243403", "0.524047", "0.5227504", "0.5212903", "0.52060825", "0.52015865", "0.51795834", "0.51783407", "0.51663536", "0.5159875", "0.5154223", "0.5152665", "0.5152665", "0.5152665", "0.51422256", "0.5133754", "0.51334214", "0.51327163", "0.51279235", "0.5126876", "0.51239365", "0.51153725", "0.5109189", "0.50919807", "0.5089785", "0.50816554", "0.50804603", "0.50780666", "0.50708085", "0.50685495", "0.5065005", "0.5059387", "0.50552076", "0.5053178", "0.5053162", "0.5053155", "0.5052998", "0.5050066", "0.50492823", "0.50490344", "0.50489813", "0.5048293", "0.50416404", "0.503574", "0.5031702", "0.5031644", "0.5027476", "0.50270665", "0.5020357", "0.50171536", "0.5016534", "0.5015029", "0.50142294", "0.50125134", "0.5005093", "0.500502", "0.50014776", "0.49977714", "0.49964607", "0.4993485", "0.49889785", "0.4986835", "0.4981063", "0.4977091", "0.49766314", "0.49708968", "0.49704462", "0.4970422" ]
0.0
-1
Deserializer used by the Consumer Service to parse images sent to the model
def kafka_deserializer(data): return pickle.loads(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize(self, data):", "def _post_deserialize (self):\n pass", "def getDeserializer():", "def deserialize(self, blob):\n pass", "def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_b64decode(data)))", "def decode(self, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def deserialize_image(self, data, give_file_name):\r\n # Generate a random 8-character name\r\n # name = \"img_\" + self.generate_random_name() + \".png\"\r\n name = give_file_name + \".png\"\r\n file_path = os.path.join(self.temp_dir, name)\r\n img = Image.frombytes(data['mode'], data['size'], data['pixels'])\r\n img.save(file_path)\r\n return file_path", "def img(self):\n return self.img_decode(self.img_msg_)", "def deserialize(self, str):\n try:\n if self.image is None:\n self.image = autonavigation.msg.Image()\n end = 0\n _x = self\n start = end\n end += 29\n (_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _struct_2IQB3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 212\n (_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width,) = _struct_d21i7bBI6d13i2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _struct_BI.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(self, imgObj):\r\n if not _checkIsStringIO(imgObj):\r\n raise TypeError('Given object is not a StringIO instance.')\r\n\r\n # Checking of image according to django.forms.fields.ImageField\r\n try:\r\n imgObj.seek(0)\r\n img = Image.open(imgObj)\r\n img.verify()\r\n except:\r\n raise ValueError('Content of given image could not be verified.')\r\n\r\n imgObj.seek(0)\r\n img = Image.open(imgObj)\r\n img.load()\r\n\r\n # Everything ok, convert PIL.Image to ROS and return it\r\n if img.mode == 'P':\r\n img = img.convert('RGB')\r\n\r\n rosimage = sensor_msgs.msg.Image()\r\n rosimage.encoding = ImageConverter._ENCODINGMAP_PY_TO_ROS[img.mode]\r\n (rosimage.width, rosimage.height) = img.size\r\n rosimage.step = (ImageConverter._PIL_MODE_CHANNELS[img.mode]\r\n * rosimage.width)\r\n rosimage.data = img.tostring()\r\n return rosimage", "def process_image(self):\n pass", "def from_dict(cls, dikt) -> \"ImageUpload\":\n return util.deserialize_model(dikt, cls)", "def deserialize(self, str):\n try:\n if self.objects is None:\n self.objects = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = vision_msgs.msg.ClassifiedObject()\n _v4 = val1.header\n start = end\n end += 4\n (_v4.seq,) = _struct_I.unpack(str[start:end])\n _v5 = _v4.stamp\n _x = _v5\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v4.frame_id = str[start:end].decode('utf-8')\n else:\n _v4.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_class = str[start:end].decode('utf-8')\n else:\n val1.object_class = str[start:end]\n start = end\n end += 4\n (val1.confidence,) = _struct_f.unpack(str[start:end])\n _v6 = val1.roi\n _x = _v6\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _struct_4IB.unpack(str[start:end])\n _v6.do_rectify = bool(_v6.do_rectify)\n self.objects.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _deserialize(self, handle):\n raise NotImplementedError", "def to_internal_value(self, data):\n if isinstance(data, str) and data.startswith('data:image'):\n # Found image is encoded, and must be decoded\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1] # Extract file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)", "def deserialize(self, obj):\n raise NotImplementedError", "def to_python(self, data):\n\n min_width, min_height, formats, max_size = config['MIN_WIDTH'], config['MIN_HEIGHT'], config['FORMATS'], \\\n config['IMAGE_MAX_SIZE']\n\n # While a lot of validation will only be carried out in-depth on the backend,\n # due to the difficulty of writing it, this size validation will be on the\n # frontend as well. This is because allowing somebody to upload a\n # large file just to get it kicked back would be a huge UX degradation\n # and also a bandwidth hog. This size validation will be accompanied by nginx\n # giving users who try to upload a truly massive file a much ruder experience\n # (dropped connection) to prevent huge server load on our end\n if data.size > max_size:\n # Translators: Error message for people who try to bypass image upload restrictions\n raise serializers.ValidationError(detail=_('Image is too large'))\n\n file = super(RestrictedDjangoImageField, self).to_python(data)\n\n width, height = file.image.size\n\n if width < min_width or height < min_height:\n # Translators: Error message when image is too small\n raise serializers.ValidationError(detail=_('Image does not meet '\n 'minimum width and height'\n ' requirements'))\n format = file.image.format\n\n if format not in formats:\n # Translators: Error message when image is not one of the allowed formats\n raise serializers.ValidationError(detail=_('Image does not meet '\n 'formatting requirements'))\n\n return file", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v32 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.db = str[start:end]\n _v33 = val2.pose\n _v34 = _v33.header\n start = end\n end += 4\n (_v34.seq,) = _get_struct_I().unpack(str[start:end])\n _v35 = _v34.stamp\n _x = _v35\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v34.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v34.frame_id = str[start:end]\n _v36 = _v33.pose\n _v37 = _v36.position\n _x = _v37\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v38 = _v36.orientation\n _x = _v38\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v39 = val1.cluster\n _v40 = _v39.header\n start = end\n end += 4\n (_v40.seq,) = _get_struct_I().unpack(str[start:end])\n _v41 = _v40.stamp\n _x = _v41\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v40.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v40.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v39.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = s.unpack(str[start:end])\n _v39.channels.append(val3)\n _v42 = val1.region\n _v43 = _v42.cloud\n _v44 = _v43.header\n start = end\n end += 4\n (_v44.seq,) = _get_struct_I().unpack(str[start:end])\n _v45 = _v44.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v44.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v44.frame_id = str[start:end]\n _x = _v43\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v43.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v43.fields.append(val4)\n _x = _v43\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v43.is_bigendian = bool(_v43.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v43.data = str[start:end]\n start = end\n end += 1\n (_v43.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v43.is_dense = bool(_v43.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v42.mask = s.unpack(str[start:end])\n _v46 = _v42.image\n _v47 = _v46.header\n start = end\n end += 4\n (_v47.seq,) = _get_struct_I().unpack(str[start:end])\n _v48 = _v47.stamp\n _x = _v48\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v47.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v47.frame_id = str[start:end]\n _x = _v46\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v46.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v46.encoding = str[start:end]\n _x = _v46\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v46.data = str[start:end]\n _v49 = _v42.disparity_image\n _v50 = _v49.header\n start = end\n end += 4\n (_v50.seq,) = _get_struct_I().unpack(str[start:end])\n _v51 = _v50.stamp\n _x = _v51\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v50.frame_id = str[start:end]\n _x = _v49\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v49.encoding = str[start:end]\n _x = _v49\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v49.data = str[start:end]\n _v52 = _v42.cam_info\n _v53 = _v52.header\n start = end\n end += 4\n (_v53.seq,) = _get_struct_I().unpack(str[start:end])\n _v54 = _v53.stamp\n _x = _v54\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v53.frame_id = str[start:end]\n _x = _v52\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v52.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v52.D = s.unpack(str[start:end])\n start = end\n end += 72\n _v52.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n _v52.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n _v52.P = _get_struct_12d().unpack(str[start:end])\n _x = _v52\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v55 = _v52.roi\n _x = _v55\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v55.do_rectify = bool(_v55.do_rectify)\n _v56 = _v42.roi_box_pose\n _v57 = _v56.header\n start = end\n end += 4\n (_v57.seq,) = _get_struct_I().unpack(str[start:end])\n _v58 = _v57.stamp\n _x = _v58\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v57.frame_id = str[start:end]\n _v59 = _v56.pose\n _v60 = _v59.position\n _x = _v60\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v61 = _v59.orientation\n _x = _v61\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v62 = _v42.roi_box_dims\n _x = _v62\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = s.unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n self.camera_info.P = _get_struct_12d().unpack(str[start:end])\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def input_handler(data, context):\n\n if context.request_content_type == \"application/x-image\":\n payload = data.read()\n encoded_image = base64.b64encode(payload).decode(\"utf-8\")\n instance = [{\"b64\": encoded_image}]\n return json.dumps({\"instances\": instance})\n else:\n _return_error(\n 415, 'Unsupported content type \"{}\"'.format(context.request_content_type or \"Unknown\")\n )", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def deserialize(self, str):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def encode_decode(self, img, img_metas):\n pass", "def decoder(self):\n pass", "def from_dict(cls, dikt) -> \"DeleteImageResponseContent\":\n return util.deserialize_model(dikt, cls)", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def test_read_namespaced_image_stream_image(self):\n pass", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.Hlines is None:\n self.Hlines = None\n if self.Vlines is None:\n self.Vlines = None\n if self.regions is None:\n self.regions = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 4\n (_x.image_width, _x.image_height,) = _get_struct_2H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Hlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Hlines.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Vlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Vlines.append(val1)\n start = end\n end += 2\n (self.PFPS,) = _get_struct_H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.regions = []\n for i in range(0, length):\n val1 = cooperative_driving_vision.msg.Region()\n _v3 = val1.color\n _x = _v3\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n _v4 = val1.moment\n _x = _v4\n start = end\n end += 40\n (_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03,) = _get_struct_10f().unpack(str[start:end])\n self.regions.append(val1)\n _x = self\n start = end\n end += 4\n (_x.box_width, _x.box_height,) = _get_struct_2H().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decoder(self, value) -> Tuple:\n data = self.decode(value)\n # TODO: remove hardcoded value.\n image_id = 1.0\n image = data[\"image\"]\n boxes = data[\"groundtruth_boxes\"]\n classes = data[\"groundtruth_classes\"]\n return (image_id, image, boxes, classes)", "def process(self, image):", "def getimage(self):", "def parse(self, data_payload):\n self.id = data_payload.get('id')\n self.name = data_payload.get('title')\n images = data_payload.get('images')\n preview_gif = images.get('preview_gif')\n original = images.get('original')\n self.url = original.get('url')\n self.preview = preview_gif.get('url')\n return self", "def test1234():\n r = request\n #\n data = uncompress_nparr(r.data) #uncompress data\n print(\"data type:{}\", type(data))\n #nparr = np.frombuffer(r.data, np.uint8)\n\n is_success, buffer = cv2.imencode(\".jpg\", data)\n io_buf = io.BytesIO(buffer)\n decode_img = cv2.imdecode(np.frombuffer(io_buf.getbuffer(), np.uint8), -1) # image\n #img = cv2.imdecode(nparr , cv2.IMREAD_COLOR)\n img_name = \"Received_JuanJoxe{}.png\".format(img_counter)\n\n cv2.imwrite(os.path.join(uploads_dir, img_name), decode_img)\n\n #\n data10 = data*10\n print(\"\\n\\nReceived array (compressed size = \"+\\\n str(r.content_length)+\"):\\n\"+str(data))\n resp, _, _ = compress_nparr(data)\n response = {'message': 'image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name)} #this is json\n print('message image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name))\n\n\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def deserialize(self, data):\n return NotImplementedError", "def decode_with_esponce(img):\n h = httplib2.Http()\n resp, content = h.request(ESPONCE_URL, \"POST\", img.read())\n content = json.loads(content)\n return content.get(\"content\")", "def deserialize(self, value):\n raise NotImplementedError", "def input_handler(data, context):\n if context.request_content_type == 'application/x-image':\n payload = data.read()\n\n img = Image.open(io.BytesIO(payload))\n img = img.convert('RGB')\n img = img.resize((IMG_SIZE, IMG_SIZE), Image.NEAREST)\n img_array = image.img_to_array(img)\n img_array = img_array.astype(np.uint8)\n \n img_preprocessed = preprocess_input(img_array)[None, :]\n\n return json.dumps({\"instances\": np.array(img_preprocessed).tolist()})\n else:\n _return_error(415, 'Unsupported content type was \"{}\"'.format(\n context.request_content_type or 'Unknown'))", "def unmarshal(self):\n ...", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def deserializer():\n return bytes.decode", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def deserialize(self, blob):\n return json.loads(blob)", "def test_read_namespaced_image_stream(self):\n pass", "def __init__(self, content_type=\"file-path/raw-bytes\"):\n super(DataSerializer, self).__init__(content_type=content_type)", "def __init__(self, image_dir, instances_json, classes_file, image_size=(64, 64), mask_size=16,\n normalize_images=True, max_samples=None, min_object_size=0.01,\n min_objects_per_image=1, max_objects_per_image=8,\n include_other=False, instance_whitelist=None):\n super(Dataset, self).__init__()\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.set_image_size(image_size)\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n self.classes = []\n annotations = list(paths.list_files(os.path.join(instances_json), validExts=(\".xml\")))\n\n # with open(instances_json, 'r') as f:\n # instances_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n new_image_ids = []\n self.image_id_to_objects = defaultdict(list)\n\n for j, ann in enumerate(annotations):\n\n tree = ET.parse(ann)\n anno_xml = tree.getroot()\n # anno_json = open(ann, 'r')\n # image_id = anno_xml.find('path').text\n image_id = j\n filename = anno_xml.find('filename').text\n size = anno_xml.findall('size')[0]\n width = size.find('width').text\n height = size.find('height').text\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n cls = open(classes_file, 'r')\n\n object_idx_to_name = {}\n all_instance_categories = []\n for i, category_data in enumerate(cls):\n category_id = i\n category_name = category_data\n all_instance_categories.append(str(category_name[:-1]))\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n category_whitelist = set(instance_whitelist)\n\n for object_data in anno_xml.findall('object'):\n bndbox = object_data.findall('bndbox')[0]\n xmin = bndbox.find('xmin').text\n ymin = bndbox.find('ymin').text\n xmax = bndbox.find('xmax').text\n ymax = bndbox.find('ymax').text\n w = int(xmax) - int(xmin)\n h = int(ymax) - int(ymin)\n # _, _, w, h = object_data['bndbox']\n # Esto no se que es lo que hace exactamente\n W, H = self.image_id_to_size[image_id]\n W = int(W)\n H = int(H)\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_data.find('name').text\n\n if object_name not in self.classes:\n self.classes.append(object_name)\n object_data.find('name').set(\"id\", str(self.classes.index(object_name)))\n # object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n # assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n self.num_objects = len(self.vocab['object_idx_to_name'])\n\n # Prune images that have too few or too many objects\n total_objs = 0\n for image_id in self.image_ids:\n # Hay que comprobar o cambiar esto a un id numerico por que al ser string no puede usarse como clave o asi para esto y da error. Investigar que se puede hacer con esto\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects_per_image <= num_objs <= max_objects_per_image:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n self.vocab['pred_idx_to_name'] = [\n '__in_image__',\n 'left of',\n 'right of',\n 'above',\n 'below',\n 'inside',\n 'surrounding',\n ]\n self.vocab['pred_name_to_idx'] = {}\n for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n self.vocab['pred_name_to_idx'][name] = idx", "def __call__(self, results):\r\n if isinstance(results['img'], str):\r\n results['filename'] = results['img']\r\n results['ori_filename'] = results['img']\r\n else:\r\n results['filename'] = None\r\n results['ori_filename'] = None\r\n img = mmcv.imread(results['img'])\r\n results['img'] = img\r\n results['img_fields'] = ['img']\r\n results['img_shape'] = img.shape\r\n results['ori_shape'] = img.shape\r\n return results", "def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'", "def __init__(self,config,typ='train'):\n\n self._config = config\n self.type = typ\n self.reader = JsonlReader(self._config.annotations.as_dict()[typ])\n self.annotations = self.reader.read()\n self.transform = get_image_processor(self._config.image_processor)", "def parse_path_image(proto_path, instance, model_path):\n\n # split the file name into parts\n name_parts = proto_path.split('_')\n # get the base path for the current model\n base_path = eval('instance.request.user.profile.' + model_path)\n # get the different parameters from the model\n # get the date\n date = datetime.datetime.strptime(name_parts[0], '%Y%m%d')\n\n # get the animal\n animal = Mouse.objects.get(mouse_name='_'.join(name_parts[1:4]))\n\n # get the region\n region = name_parts[4]\n\n # define the path for the different files\n bfpath = join(base_path, '_'.join((name_parts[0], animal.mouse_name, 'BF', region)) + '.tif')\n flpath = bfpath.replace('BF', 'FL')\n flgreenpath = bfpath.replace('BF', 'FLgreen')\n otherpath = bfpath.replace('BF', 'OTHER')\n\n return {'owner': instance.request.user,\n 'mouse': animal,\n 'window_date': date,\n 'bfPath': bfpath,\n 'flPath': flpath,\n 'flgreenPath': flgreenpath,\n 'otherPath': otherpath,\n 'region': region}", "def _handle_image(self, image_msg):\n # converting the ROS image message to CV2-image\n image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')\n\n # Skip if image is None\n if image is None:\n rospy.logdebug(\"Image content is None :(\", logger_name=\"vision\")\n return\n\n # Check if its the first image callback\n if self._first_image_callback:\n # Check if a cap may be on the camera\n self._handle_forgotten_camera_cap(image)\n\n # Instances that should be notified with the new image\n internal_image_subscribers =[\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._field_boundary_detector,\n self._obstacle_detector,\n self._red_obstacle_detector,\n self._blue_obstacle_detector,\n self._goalpost_detector,\n self._line_detector,\n self._ball_detector,\n self._debug_image_creator,\n ]\n\n # Distribute the image to the detectors\n # Iterate over subscribers\n for vision_object in internal_image_subscribers:\n # Send image\n vision_object.set_image(image)\n\n # Check if the vision should run the conventional and neural net part parallel\n if self._config['vision_parallelize']:\n # Create and start threads for conventional calculation and neural net\n #fcnn_thread = Thread(target=self._ball_detector.compute)\n\n conventional_thread = Thread(target=self._conventional_precalculation())\n\n conventional_thread.start()\n #fcnn_thread.start()\n\n # Wait for both threads\n conventional_thread.join()\n #fcnn_thread.join()\n else:\n # Calc conventional calculation and neural net\n self._ball_detector.compute()\n self._conventional_precalculation()\n\n ########\n # Ball #\n ########\n\n # Get a number of top balls under the field boundary, which have an high enough rating\n all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)\n balls_under_field_boundary = \\\n self._field_boundary_detector.candidates_under_convex_field_boundary(\n all_balls,\n self._ball_candidate_y_offset)\n top_balls = candidate.Candidate.rating_threshold(\n balls_under_field_boundary,\n self._ball_candidate_threshold)\n # check whether there are ball candidates\n if top_balls:\n # Convert ball cancidate list to ball message list\n list_of_balls = map(ros_utils.build_ball_msg, top_balls)\n # Create balls msg with the list of balls\n balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)\n # Publish balls\n self._pub_balls.publish(balls_msg)\n\n # Debug draw all ball candidates\n self._debug_image_creator.draw_ball_candidates(\n all_balls,\n (0, 0, 255))\n # Debug draw possible ball candidates under the field boundary\n self._debug_image_creator.draw_ball_candidates(\n balls_under_field_boundary,\n (0, 255, 255))\n # Debug draw top ball candidate\n self._debug_image_creator.draw_ball_candidates(\n top_balls,\n (0, 255, 0),\n thickness=2)\n\n #############\n # Obstacles #\n #############\n\n # Init list for obstacle msgs\n list_of_obstacle_msgs = []\n # Add red obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,\n self._red_obstacle_detector.get_candidates()))\n # Add blue obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,\n self._blue_obstacle_detector.get_candidates()))\n # Add UFO's (Undefined Found Obstacles)\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,\n self._unknown_obstacle_detector.get_candidates()))\n # Build obstacles msgs containing all obstacles\n obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)\n # Publish obstacles\n self._pub_obstacle.publish(obstacles_msg)\n\n # Debug draw unknown obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._unknown_obstacle_detector.get_candidates(),\n (0, 0, 0),\n thickness=3)\n # Debug draw red obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._red_obstacle_detector.get_candidates(),\n (0, 0, 255),\n thickness=3)\n # Debug draw blue obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._blue_obstacle_detector.get_candidates(),\n (255, 0, 0),\n thickness=3)\n\n ########\n # Goal #\n ########\n\n # Get all goalposts under field boundary\n goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(\n self._goalpost_detector.get_candidates(),\n self._goal_post_field_boundary_y_offset)\n\n # Get goalpost msgs and add them to the detected goal posts list\n goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)\n # Create goalposts msg\n goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)\n # Check if there is a goal\n if goal_posts_msg:\n # If we have a goal, lets publish it\n self._pub_goal_posts.publish(goal_posts_msg)\n\n # Debug draw all goal posts\n self._debug_image_creator.draw_obstacle_candidates(\n self._goalpost_detector.get_candidates(),\n (180, 180, 180),\n thickness=3)\n # Debug draw goal posts which start in the field\n self._debug_image_creator.draw_obstacle_candidates(\n goal_posts,\n (255, 255, 255),\n thickness=3)\n\n #########\n # Lines #\n #########\n if self._use_line_points:\n # Build a LineSegmentInImage message for each linepoint\n line_points = self._line_detector.get_linepoints()\n # Create line segments\n line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)\n # Create line msg\n line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)\n # Publish lines\n self._pub_lines.publish(line_msg)\n\n # Draw debug line points\n self._debug_image_creator.draw_points(\n line_points,\n (0, 0, 255))\n\n if self._use_line_mask:\n # Define detections (Balls, Goal Posts) that are excluded from the line mask\n excluded_objects = top_balls + goal_posts\n # Get line pixel mask\n line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)\n # Create line mask message\n line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')\n # Publish line mask\n self._pub_line_mask.publish(line_mask_message)\n\n # Draw debug line mask\n self._debug_image_creator.draw_mask(\n line_mask,\n color=(255, 0, 0),\n opacity=0.8)\n\n ##################\n # Field boundary #\n ##################\n\n # Get field boundary msg\n convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()\n # Build ros message\n convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)\n # Publish field boundary\n self._pub_convex_field_boundary.publish(convex_field_boundary_msg)\n\n # Debug draw convex field boundary\n self._debug_image_creator.draw_field_boundary(\n convex_field_boundary,\n (0, 255, 255))\n # Debug draw field boundary\n self._debug_image_creator.draw_field_boundary(\n self._field_boundary_detector.get_field_boundary_points(),\n (0, 0, 255))\n\n #########\n # Debug #\n #########\n '''\n if self._config['neural_network_type'] == 'fcnn':\n # Publish fcnn output for the region of interest under the field boundary (for the world model)\n if self._ball_fcnn_publish_output:\n roi_msg = ros_utils.build_fcnn_region_of_interest(\n self._ball_detector.get_fcnn_output(),\n self._field_boundary_detector,\n image_msg.header,\n self._config['ball_fcnn_publish_field_boundary_offset'])\n self._pub_ball_fcnn.publish(roi_msg)\n\n # Publish whole fcnn output for debug purposes\n if self._publish_fcnn_debug_image:\n self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())\n '''\n # Check, if HSV mask images should be published\n if self._publish_HSV_mask_image:\n # Mask images\n white_mask = self._white_color_detector.get_mask_image()\n red_mask = self._red_color_detector.get_mask_image()\n blue_mask = self._blue_color_detector.get_mask_image()\n\n # Publish mask images\n self._pub_white_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))\n self._pub_red_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))\n self._pub_blue_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))\n\n # Check, if field mask image should be published\n if self._publish_field_mask_image:\n if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):\n # Mask image\n dyn_field_mask = self._field_color_detector.get_mask_image()\n static_field_mask = self._field_color_detector.get_static_mask_image()\n # Publish mask image\n self._pub_dynamic_color_lookup_table_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))\n else:\n # Mask image\n field_mask = self._field_color_detector.get_mask_image()\n # Publish mask image\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))\n\n # Check if we should draw debug image\n if self._debug_image_creator.active:\n # publish debug image\n self._pub_debug_image.publish(\n ros_utils.build_image_msg(\n image_msg.header,\n self._debug_image_creator.get_image(),\n 'bgr8'))", "def toObj(self):\n obj = {}\n obj[\"name\"] = Image.descorize(self.name)\n obj[\"size\"] = Image.getSize(self.img.size)\n obj[\"url\"] = self.img.url\n obj[\"thumbnailUrl\"] = self.img.url\n obj[\"deleteUrl\"] = \"/delete/\" + str(self.imgHash) + \"/\"\n obj[\"deleteType\"] = \"POST\"\n obj[\"contentType\"] = self.contentType\n # Sunday Oct 26, 2014 - 06:13:57 PM\n obj[\"dateAdded\"] = self.dateAdded.strftime(\"%A %b %d, %Y - %I:%M:%S %p\")\n \n return obj", "def deserialize_numpy(self, str, numpy):\n try:\n if self.objects is None:\n self.objects = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = vision_msgs.msg.ClassifiedObject()\n _v10 = val1.header\n start = end\n end += 4\n (_v10.seq,) = _struct_I.unpack(str[start:end])\n _v11 = _v10.stamp\n _x = _v11\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v10.frame_id = str[start:end].decode('utf-8')\n else:\n _v10.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_class = str[start:end].decode('utf-8')\n else:\n val1.object_class = str[start:end]\n start = end\n end += 4\n (val1.confidence,) = _struct_f.unpack(str[start:end])\n _v12 = val1.roi\n _x = _v12\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _struct_4IB.unpack(str[start:end])\n _v12.do_rectify = bool(_v12.do_rectify)\n self.objects.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.image is None:\n self.image = autonavigation.msg.Image()\n end = 0\n _x = self\n start = end\n end += 29\n (_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _struct_2IQB3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 212\n (_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width,) = _struct_d21i7bBI6d13i2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _struct_BI.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def images(self) -> dict:\n raise NotImplementedError", "def imagenet_parser(value, image_size, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.parse_single_example(value, keys_to_features)\n\n image_buffer = tf.reshape(parsed['image/encoded'], shape=[])\n\n xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0)\n # Note that ordering is (y, x)\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n image = image_preprocessing(\n image_buffer=image_buffer,\n bbox=bbox,\n image_size=image_size,\n is_training=is_training\n )\n\n # Labels are in [1, 1000] range\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)\n\n return image, label", "def from_json(cls, data):\n mapping = json.loads(data)\n # Parse the global data, of which there is only the timestamp. Even\n # though the string will contain 'UTC' (which we assert is so since we\n # can only handle UTC timestamps), strptime() will return a naive\n # datetime. We'll turn it into an aware datetime in UTC, which is the\n # only thing that can possibly make sense.\n timestamp_str = mapping['global']['generated_at']\n assert 'UTC' in timestamp_str.split(), 'timestamps must be UTC'\n naive_generated_at = datetime.strptime(timestamp_str, IN_FMT)\n generated_at=naive_generated_at.replace(tzinfo=timezone.utc)\n global_ = Bag(generated_at=generated_at)\n # Parse the images.\n images = []\n for image_data in mapping['images']:\n # Descriptions can be any of:\n #\n # * description\n # * description-xx (e.g. description-en)\n # * description-xx_CC (e.g. description-en_US)\n #\n # We want to preserve the keys exactly as given, and because the\n # extended forms are not Python identifiers, we'll pull these out\n # into a separate, non-Bag dictionary.\n descriptions = {}\n # We're going to mutate the dictionary during iteration.\n for key in list(image_data):\n if key.startswith('description'):\n descriptions[key] = image_data.pop(key)\n files = image_data.pop('files', [])\n bundles = [Bag(**bundle_data) for bundle_data in files]\n image = Image(files=bundles,\n descriptions=descriptions,\n **image_data)\n images.append(image)\n return cls(global_=global_, images=images)", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def parse_image(self, image):\n # parse the image data into a pygame surface for display or screenshot\n # raw image is BGRA\n # if image_type is segmentation, here will convert to the pre-defined color\n image.convert(self.image_type)\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1] # BGR -> RGB\n self.rgb_image = array\n self.pygame_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n\n self.last_image_seconds = image.timestamp\n self.last_image_frame_num = image.frame", "def full_dehydrate(self, bundle):\n # Dehydrate each field.\n if bundle.obj.obj_type() == 'image':\n obj = ImageResource()\n elif bundle.obj.obj_type() == 'wordbox':\n obj = WordBoxResource()\n else:\n return bundle\n for field_name, field_object in obj.fields.items():\n try:\n # A touch leaky but it makes URI resolution work.\n if(getattr(field_object, 'dehydrated_type', None)\n == 'related'):\n field_object.api_name = self._meta.api_name\n field_object.resource_name = obj._meta.resource_name\n\n bundle.data[field_name] = field_object.dehydrate(bundle)\n\n # Check for an optional method to do further dehydration.\n method = getattr(obj, \"dehydrate_%s\" % field_name, None)\n except:\n raise BadRequest(\"Internal error, possible problem with\"\n \" top_commnets for images\")\n\n if method:\n bundle.data[field_name] = method(bundle)\n\n bundle = obj.dehydrate(bundle)\n return bundle", "def data(self):\n return self.image", "def from_dict(cls, dikt) -> 'ImageData':\n return util.deserialize_model(dikt, cls)", "def perform_create(self, serializer):\n imgs = []\n for image_field, hash_field, suffix in self.image_fields:\n if serializer.validated_data.get(image_field):\n img_url = serializer.validated_data[image_field]\n img, hash_ = image_from_url(img_url)\n # Store img for `post_save` where we have access to the pk so\n # we can save img in appropriate directory.\n imgs.append((suffix, img, hash_))\n serializer.validated_data[hash_field] = hash_\n elif ((serializer.validated_data.get('type') or\n (serializer.instance and\n getattr(serializer.instance, 'type', None))) ==\n feed.COLLECTION_PROMO):\n # Remove background images for promo collections.\n serializer.validated_data[hash_field] = None\n if image_field in serializer.validated_data:\n del serializer.validated_data[image_field]\n\n obj = serializer.save()\n\n for suffix, image, hash_ in imgs:\n if image:\n i = Image.open(image)\n path = obj.image_path(suffix)\n with public_storage.open(path, 'wb') as f:\n i.save(f, 'png')\n pngcrush_image.delay(path, set_modified_on=[obj])", "def post(self):\n img, data, request = self.img, self.data, self.request\n\n # ImageImportAdmin permission is required\n if not request.user.has_permission(request, ImageImportAdminPermission.name):\n raise PermissionDenied\n\n # Validate URL and file URL\n ser_import = ImportImageSerializer(img, data=data)\n\n if not ser_import.is_valid():\n return FailureTaskResponse(request, ser_import.errors, dc_bound=self.dc_bound)\n\n if not request.user.is_staff:\n self.data.pop('dc_bound', None) # default DC binding cannot be changed when creating object\n\n img.manifest = ser_import.manifest # Load imported manifest\n img.owner = request.user # Default user (can be changed)\n img.alias = img.name # Default alias (can be changed)\n img.status = Image.OK # Set status for preliminary checks\n\n # More default fields retrieved from the downloaded image manifest\n for img_field in ('version', 'desc', 'resize', 'deploy', 'tags'):\n if img_field not in data:\n def_value = getattr(img, img_field, None)\n if def_value:\n data[img_field] = def_value\n\n # Validate data for overriding manifest info\n ser = ImageSerializer(request, img, data)\n\n if not ser.is_valid():\n return FailureTaskResponse(request, ser.errors, dc_bound=self.dc_bound)\n\n # Preliminary checks\n self._run_checks()\n # Build new manifest\n img.manifest = img.build_manifest()\n # Add URL into detail dict\n ser_data = ser.data\n dd = ser.detail_dict()\n dd.update(ser_import.detail_dict())\n\n if self.img_server:\n img.status = Image.PENDING\n img.save()\n\n if ser_import.img_file_url.startswith(self.img_server.repo_url):\n logger.info('Importing image from local image server - assuming that image exists on server')\n cmd = 'esimg update -c'\n else:\n cmd = 'esimg import -f %s' % ser_import.img_file_url\n\n return self._run_execute(LOG_IMAGE_IMPORT, cmd, stdin=img.manifest.dump(), delete_on_error=True,\n detail_dict=dd)\n else:\n img.status = Image.OK\n img.manifest_active = img.manifest\n img.save()\n\n return SuccessTaskResponse(self.request, ser_data, obj=img, msg=LOG_IMAGE_IMPORT,\n detail_dict=dd, dc_bound=self.dc_bound)", "def _deserialize_data(self):\n try:\n self._func_name, self._instance, self._args, self._kwargs = self.serializer.loads(self.data)\n except Exception as e:\n raise DeserializationError() from e", "def store_img_infos(self, msg):\n # msg is technically a ConsumerRecord that is a collections.namedtuple, see:\n # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py#L30\n strk = str(msg['sha1'])\n self.dict_sha1_infos[strk] = dict()\n for key in msg:\n # dumps json of 'img_info'\n # We actually need that only for DIG...\n if key == \"img_info\":\n self.dict_sha1_infos[strk][key] = json.dumps(msg[key])\n else:\n # discard 'img_buffer' (if it exists?...), and 'sha1'\n # if k != \"img_buffer\" and k != \"sha1\":\n # self.dict_sha1_infos[strk][k] = msg[k]\n # discard 'sha1'\n if key != \"sha1\":\n self.dict_sha1_infos[strk][key] = msg[key]", "def image(self):\n return self._image", "def __init__(self, image):\n self.image = image", "def full_dehydrate(self, bundle):\n # Dehydrate each field.\n if bundle.obj.obj_type() == 'image':\n obj = ImageResource()\n elif bundle.obj.obj_type() == 'wordbox':\n obj = WordBoxResource()\n else:\n return bundle\n for field_name, field_object in obj.fields.items():\n try:\n # A touch leaky but it makes URI resolution work.\n if getattr(field_object, 'dehydrated_type', None) == 'related':\n field_object.api_name = self._meta.api_name\n field_object.resource_name = obj._meta.resource_name\n\n bundle.data[field_name] = field_object.dehydrate(bundle)\n\n # Check for an optional method to do further dehydration.\n method = getattr(obj, \"dehydrate_%s\" % field_name, None)\n except:\n raise BadRequest(\"Internal error, possible problem with \"\n \"top_commnets for images\")\n\n if method:\n bundle.data[field_name] = method(bundle)\n\n bundle = obj.dehydrate(bundle)\n return bundle", "def _deserialize(self):\n try:\n self._as_dict = yaml.load(self.path)\n except ScannerError as e:\n raise exc.ContentSerializeError(self, self.path, e.problem)", "def parse(self):\n imset = []\n imdir = remkdir(os.path.join(self._datadir, 'images'))\n csv_actors = readcsv(os.path.join(self._datadir, 'facescrub_actors.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actors[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'male'}))\n\n csv_actresses = readcsv(os.path.join(self._datadir, 'facescrub_actresses.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actresses[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'female'}))\n\n return imset", "def parse_image_data(image_urls):\n\n # Initialize images.\n initialized_ClImages = list(map(ClImage, image_urls))\n app = ClarifaiApp(api_key=CLARIFAI_KEY)\n image_info = {}\n\n # Obtain relevant tag information\n try:\n general_response = app.models.get('general-v1.3').predict(initialized_ClImages)\n except ApiError as e:\n error = json.loads(e.response.content)\n pprint('error: {}'.format(error))\n\n else:\n for item in general_response['outputs']:\n video_id = item['input']['data']['image']['url'][23:34]\n thumbnail_tags = set()\n for tag in item['data']['concepts']:\n if tag['value'] > .9:\n tag_string = tag['name'].strip().lower()\n if not Tag.query.filter(Tag.tag == tag_string).first():\n add_tag_data(tag_string)\n thumbnail_tags.add(tag_string)\n image_info[video_id] = {'tags': thumbnail_tags}\n\n\n # Obtain nsfw score\n try:\n nsfw_response = app.models.get('nsfw-v1.0').predict(initialized_ClImages)\n except ApiError as e:\n error = json.loads(e.response.content)\n pprint('error: {}'.format(error))\n\n else:\n for item in nsfw_response['outputs']: #nsfw_r['outputs'] is a list\n video_id = item['input']['data']['image']['url'][23:34]\n nsfw_score = round(item['data']['concepts'][-1]['value'] * 100)\n image_info[video_id]['nsfw_score'] = nsfw_score\n\n\n # Obtain color data\n # try:\n # color_response = app.models.get('color').predict(initialized_ClImages)\n # except ApiError as e:\n # error = json.loads(e.response.content)\n # pprint('error: {}'.format(error[-100:]))\n\n # else:\n # for item in color_response['outputs']:\n # video_id = item['input']['data']['image']['url'][23:34]\n # color_tags = {}\n # for color in item['data']['colors']: # item['data']['colors'] is a list\n # if color['value'] > .2:\n # color_hex = color['w3c']['hex'].rstrip().lower()\n # color_name = color['w3c']['name'].rstrip().lower()\n # if not Color.query.filter(Color.hex_code == color_hex).first():\n # add_color_data(color_hex, color_name)\n # image_info[video_id]['colors'] = color_tags\n\n return image_info", "def __init__(self, json):\n\n self.height = json[\"height\"]\n self.width = json[\"width\"]\n self.src = json[\"src\"]", "def decode(self, img_metas, output, **kwargs):\n batch_size = len(img_metas)\n sigma = output[..., 2:]\n output = output[..., :2]\n if 'bbox_id' in img_metas[0]:\n bbox_ids = []\n else:\n bbox_ids = None\n c = np.zeros((batch_size, 2), dtype=np.float32)\n s = np.zeros((batch_size, 2), dtype=np.float32)\n image_paths = []\n score = np.ones(batch_size)\n for i in range(batch_size):\n c[i, :] = img_metas[i]['center']\n s[i, :] = img_metas[i]['scale']\n image_paths.append(img_metas[i]['image_file'])\n if 'bbox_score' in img_metas[i]:\n score[i] = np.array(img_metas[i]['bbox_score']).reshape(-1)\n if bbox_ids is not None:\n bbox_ids.append(img_metas[i]['bbox_id'])\n preds, maxvals = keypoints_from_regression(output, c, s, kwargs['img_size'])\n if self.out_sigma:\n maxvals = (1 - sigma).mean(axis=2, keepdims=True)\n all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)\n all_boxes = np.zeros((batch_size, 6), dtype=np.float32)\n all_preds[:, :, 0:2] = preds[:, :, 0:2]\n all_preds[:, :, 2:3] = maxvals\n all_boxes[:, 0:2] = c[:, 0:2]\n all_boxes[:, 2:4] = s[:, 0:2]\n all_boxes[:, 4] = np.prod(s * 200.0, axis=1)\n all_boxes[:, 5] = score\n result = {}\n result['preds'] = all_preds\n result['boxes'] = all_boxes\n result['image_paths'] = image_paths\n result['bbox_ids'] = bbox_ids\n return result", "def image(self):\n return self.chunks.get('image')", "def onImageReceived(self, msg):\n\n self.BGR = self.bridge.imgmsg_to_cv2(msg)\n self.processImage(self.BGR)", "def deserialize(self, data, status_code):\r\n if status_code == 204:\r\n return data\r\n return serializer.Serializer(self.get_attr_metadata()).deserialize(\r\n data, self.content_type())['body']", "def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def decompose(self, *args, **kwargs):\n return _image.image_decompose(self, *args, **kwargs)", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def load_image(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n # height = labelmeJson['imageHeight']\n # width = labelmeJson['imageWidth']\n # shape_list = labelmeJson['shapes']\n image = self.img_b64_to_arr(labelmeJson['imageData'])\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([labelmeJson['height'], labelmeJson['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n #\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n return image", "def image_loader(fileobj):\n if isinstance(fileobj, six.string_types):\n return cv2.imread(fileobj, cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb\n elif isinstance(fileobj, bytes):\n byte_arr = bytearray(fileobj)\n else:\n byte_arr = bytearray(fileobj.read())\n \n return cv2.imdecode(np.asarray(byte_arr, dtype=np.uint8), cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 100\n (_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y,) = _get_struct_h16fh8f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _deserialize(self, data):\n\n firstInd = 0\n deserialized_data = []\n for shp in self._data_shape_list:\n if len(shp) > 1:\n shift = np.prod(shp)\n elif len(shp) == 0:\n shift = 1\n else:\n shift = shp[0]\n tmp_array = data[firstInd:firstInd+shift]\n tmp_array = tmp_array.reshape(shp)\n deserialized_data.append(tmp_array)\n firstInd += shift\n return deserialized_data", "def load_image(self, **kwargs):\n ...", "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def deserialize(self, str):\n try:\n if self.pose is None:\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n end = 0\n _x = self\n start = end\n end += 72\n (_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_Q8d().unpack(str[start:end])\n start = end\n end += 288\n self.pose.covariance = _get_struct_36d().unpack(str[start:end])\n _x = self\n start = end\n end += 40\n (_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h,) = _get_struct_5d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.modality = str[start:end].decode('utf-8')\n else:\n self.modality = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.embed_vector = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decodeFrame(self, image):\n return image", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def serialize(self):\n return {'id':self.id,\n 'flavor':self.flavor,\n 'size':self.size,\n 'rating':self.rating,\n 'image':self.image}", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def _image_callback(self, image_msg):\n # type: (Image) -> None\n # Drops old images and cleans up the queue.\n # Still accepts very old images, that are most likely from ROS bags.\n image_age = rospy.get_rostime() - image_msg.header.stamp\n if 1.0 < image_age.to_sec() < 1000.0:\n rospy.logwarn(f\"Vision: Dropped incoming Image-message, because its too old! ({image_age.to_sec()} sec)\",\n logger_name=\"vision\")\n return\n\n if self._transfer_image_msg_mutex.locked():\n return\n\n with self._transfer_image_msg_mutex:\n # Transfer the image to the main thread\n self._transfer_image_msg = image_msg", "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def deserialize(self, value):\n if value == 'auto':\n return Recollection\n else:\n return self._klass.deserialize(value)", "def from_json_data(self, obj):\n self.init(obj[\"tile_width\"], obj[\"tile_height\"], obj[\"num_x\"], obj[\"num_y\"])\n self._id_to_pos = {i: tuple(obj[\"ids\"][i]) for i in obj[\"ids\"]}\n self._pos_to_id = {self._id_to_pos[i]: i for i in self._id_to_pos}\n self.color_qimage = base64_to_qimage(obj[\"png\"][\"color\"])\n self.height_qimage = base64_to_qimage(obj[\"png\"][\"height\"])", "def parse_img(image_path):\n image = tf.read_file(image_path)\n image = tf.image.decode_image(image)\n image = tf.reshape(image, [INITIAL_RES, INITIAL_RES, 3])\n image = tf.image.resize_images(image, [OUTPUT_RES, OUTPUT_RES])\n #image = image[:, :, ::-1] # BGE -> RGB conversion if needed?\n #image = tf.image.rgb_to_grayscale(image)\n #image = tf.image.convert_image_dtype(image, tf.float32) # In neuralNet.py\n image = image.eval() # Convert from tensor to Numpy array for Keras\n return image", "def deserialize_numpy(self, str, numpy):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v94 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v94.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v94.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v94.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v94.db = str[start:end]\n _v95 = val2.pose\n _v96 = _v95.header\n start = end\n end += 4\n (_v96.seq,) = _get_struct_I().unpack(str[start:end])\n _v97 = _v96.stamp\n _x = _v97\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v96.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v96.frame_id = str[start:end]\n _v98 = _v95.pose\n _v99 = _v98.position\n _x = _v99\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v100 = _v98.orientation\n _x = _v100\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v101 = val1.cluster\n _v102 = _v101.header\n start = end\n end += 4\n (_v102.seq,) = _get_struct_I().unpack(str[start:end])\n _v103 = _v102.stamp\n _x = _v103\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v102.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v102.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v101.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v101.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v101.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n _v101.channels.append(val3)\n _v104 = val1.region\n _v105 = _v104.cloud\n _v106 = _v105.header\n start = end\n end += 4\n (_v106.seq,) = _get_struct_I().unpack(str[start:end])\n _v107 = _v106.stamp\n _x = _v107\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v106.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v106.frame_id = str[start:end]\n _x = _v105\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v105.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v105.fields.append(val4)\n _x = _v105\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v105.is_bigendian = bool(_v105.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v105.data = str[start:end]\n start = end\n end += 1\n (_v105.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v105.is_dense = bool(_v105.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v104.mask = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n _v108 = _v104.image\n _v109 = _v108.header\n start = end\n end += 4\n (_v109.seq,) = _get_struct_I().unpack(str[start:end])\n _v110 = _v109.stamp\n _x = _v110\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v109.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v109.frame_id = str[start:end]\n _x = _v108\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v108.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v108.encoding = str[start:end]\n _x = _v108\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v108.data = str[start:end]\n _v111 = _v104.disparity_image\n _v112 = _v111.header\n start = end\n end += 4\n (_v112.seq,) = _get_struct_I().unpack(str[start:end])\n _v113 = _v112.stamp\n _x = _v113\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v112.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v112.frame_id = str[start:end]\n _x = _v111\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v111.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v111.encoding = str[start:end]\n _x = _v111\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v111.data = str[start:end]\n _v114 = _v104.cam_info\n _v115 = _v114.header\n start = end\n end += 4\n (_v115.seq,) = _get_struct_I().unpack(str[start:end])\n _v116 = _v115.stamp\n _x = _v116\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v115.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v115.frame_id = str[start:end]\n _x = _v114\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v114.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v114.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v114.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 72\n _v114.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 72\n _v114.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 96\n _v114.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)\n _x = _v114\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v117 = _v114.roi\n _x = _v117\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v117.do_rectify = bool(_v117.do_rectify)\n _v118 = _v104.roi_box_pose\n _v119 = _v118.header\n start = end\n end += 4\n (_v119.seq,) = _get_struct_I().unpack(str[start:end])\n _v120 = _v119.stamp\n _x = _v120\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v119.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v119.frame_id = str[start:end]\n _v121 = _v118.pose\n _v122 = _v121.position\n _x = _v122\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v123 = _v121.orientation\n _x = _v123\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v124 = _v104.roi_box_dims\n _x = _v124\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 72\n self.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 72\n self.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 96\n self.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def model_processing(img):\n\n # assert isinstance(img, EmotionalImage)\n\n if str(img.name).find('json') > -1:\n return\n user = get_user(img.path + '/' + 'meta.json')\n filePath = img.path + '/' + img.name\n # print(\"---------------Processsing----------------\", img.name)\n\n features = extract_features(filePath)\n emotions = predict_emotions(features)\n uuid1 = uuid.uuid4()\n emImage = EmotionalImage(\n uuid1, img.name, img.path, features, emotions, \"\", \"\", \"\")\n user.images.append(emImage)\n # user.save()", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def _process_img_semantic(self, sensor_data):\n sensor_data.convert(self.cc)\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.semantic = img # need to scale rgb values to be {0,1}" ]
[ "0.64817536", "0.6376675", "0.63246363", "0.6318897", "0.62756526", "0.62049454", "0.61352426", "0.6036559", "0.59855014", "0.59849745", "0.5978087", "0.59708434", "0.58874786", "0.58653855", "0.57746744", "0.5726982", "0.5700346", "0.56928164", "0.565674", "0.5638816", "0.56269187", "0.5617103", "0.5601748", "0.55945575", "0.5587654", "0.5569382", "0.55682033", "0.55462545", "0.55424213", "0.5533669", "0.55321616", "0.55199754", "0.55161107", "0.55122703", "0.5497061", "0.54838693", "0.54556096", "0.54409313", "0.5437712", "0.5432761", "0.54326046", "0.5417718", "0.54157245", "0.5405711", "0.5400642", "0.5398843", "0.53883773", "0.53826374", "0.53671384", "0.53581965", "0.5314091", "0.5304823", "0.53034943", "0.53033507", "0.5297565", "0.52954364", "0.5292182", "0.5290218", "0.52875006", "0.5285814", "0.52740216", "0.5272668", "0.52714086", "0.5265", "0.5261069", "0.52512413", "0.5244697", "0.5240946", "0.52361447", "0.5232256", "0.5230753", "0.52225244", "0.52184135", "0.52167726", "0.52115136", "0.520417", "0.5204028", "0.5200149", "0.5199645", "0.51729846", "0.5172723", "0.51569796", "0.51558304", "0.51553977", "0.5150183", "0.5147562", "0.51439553", "0.5136933", "0.5134238", "0.5130627", "0.5127897", "0.51276207", "0.51275635", "0.5127167", "0.51260763", "0.51208526", "0.5110632", "0.5107213", "0.5100555", "0.50977", "0.5097305" ]
0.0
-1
return img from disk
def api_get_icon(): pkg_name = request.args.get('pkg') if pkg_name: pkg_files = Database().db.get_pkg_files(pkg_name) for src in pkg_files: if src.startswith("/usr/share/icons/hicolor/32x32/apps/"): return send_file(src, as_attachment=False) return send_file("static/images/null.gif") else: src = request.args.get('i') if not os.path.isfile(src): #abort(404) return send_file("static/images/null.gif") return send_file(src, as_attachment=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def read_image(path):\n img = misc.imread(path)\n return img", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def getImage(filename):\n if not fs.exists(filename=filename):\n raise Exception(\"mongo file does not exist! {0}\".format(filename)) \n im_stream = fs.get_last_version(filename)\n im = Image.open(im_stream)\n img_io = BytesIO() \n im.save(img_io, 'JPEG', quality=70)\n img_io.seek(0)\n return img_io\n \n #return serve_pil_image(im)\n\n #d = ObjectId('5ad204a5c2eb5d031a7fd7e5') \n #connection = MongoClient()\n #database = connection['image']\n # create a new gridfs object.\n #fs = gridfs.GridFS(database)\n #outputdata = fs.get(d).read()\n #decode=outputdata#.decode()\n #return decode", "def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image", "def get_image(path):\n\n # Check if the picture exists or not.\n if not os.path.isfile(path):\n print('Cannot open the image. Please try again!')\n exit(1)\n\n try:\n # Open the image.\n image = Image.open(path)\n\n # If everything is okay return it.\n return image\n # If an error occurred.\n except Exception as err:\n print('Error occurred while trying to open the image:', err, 'Please try again!')\n exit(1)", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def image(fname):\n return cv2.imread(fname)", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def load_image(file_path):\r\n return Image.open(file_path)", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def get_image(self):\n self.drawer.flush()\n return self.img", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def img(filename='steine.jpg'):\n\treturn mpimg.imread('/content/drive/My Drive/colab/images/'+filename)", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def imread(fname):\r\n return skimage.io.imread(fname)", "def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()", "def get_img_file(image, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img')\n # get location title.\n loc_id = db.get_img_loc(int(image))\n if loc_id == None:\n raise ValueError('The image %s could not be found' % image)\n loc = db.get_location(loc_id)\n title = loc['title']\n # add to file name\n img_dir = path.join(img_dir, title, str(image) + '.jpg')\n return img_dir", "def make_image(self, path):\n\t\treturn None", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def get(self, img):\n\n return send_from_directory(\"images\", img)", "def load(path) -> Image:\n return Image.open(path)", "def read_image_from_fs(name):\n with open(name, \"rb\") as fin:\n return fin.read()", "def get_input(path):\n img = imread(path)\n return img", "def __get_image_file(self):\n if file_utils.file_exists(self.image_file_path):\n return open(self.image_file_path, 'r')\n else:\n if not os.path.exists(self.download_path):\n os.makedirs(self.download_path)\n logger.info('Found existing image file')\n return self.__download_image_file()", "def getimage(self):", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def get_image(self, image_dir_root=None):\n image_dir_root = image_dir_root or image_dir\n return Image.open(os.path.join(image_dir_root, self.name))", "def getImgContentFile(img):\n format, imgstr = img.split(';base64,')\n ext = format.split('/')[-1]\n file = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)\n return file", "def getimgs():", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def getImage(url):\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img", "def imagefile(self):\n return os.path.join(self.__folder, self.__name + '.jpg')", "def read_img(img_path): \n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def _getImage(self, img):\n\n # lazily fill in some attributes\n if not 'local_file_path' in img:\n img['local_file_path'] = os.path.join(self.image_root, img['filename'])\n if not 'feat' in img: # also fill in the features\n # NOTE: imgid is an integer, and it indexes into features\n fn = os.path.basename(img['filename'])\n return img", "def imread(path):\n img = cv2.imread(path)\n return img", "def get_image():\n response = send_file(tempFileObj, as_attachment=True, attachment_filename='marked_image.png')\n return response", "def read(self):\n with self.lock:\n return self.image", "def get_image(self):\n return self.image", "def get_image(self):\n return self.image", "def get_image(self):\n return self.image", "def hload_pil(filepath):\n img = Image.open(filepath)\n return img", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def get_image():\n return models.Image.objects.all()[0]", "def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def download_pil_image(self, url):\r\n return Image.open(urlopen(url))", "def _get_image_from_file(dir_path, image_file):\n # Save ourselves the effort if PIL is not present, and return None now\n if not PIL_ENABLED:\n return None\n # Put together full path\n path = os.path.join(dir_path, image_file)\n # Try to read the image\n img = None\n try:\n from PIL import Image\n img = Image.open(path)\n except IOError as exptn:\n print('Error loading image file %s: %s' % (path, exptn))\n # Return image or None\n return img", "def get_itk_image(path):\n\n reader = itk.ImageFileReader()\n reader.SetFileName(path)\n\n image = reader.Execute()\n\n return image", "def get_image(id_num):\n return sqldb.get_image(id_num)", "def get_file(file_info):\n if session_vars.filepath == file_info['filepath']:\n img_file = session_vars.img_file\n else:\n print('loading', file_info['filepath'])\n if file_info['ext']=='fits':\n print('Detected fits image type')\n pyfits = import_fits()\n img_file = pyfits.open(file_info['filepath'])\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img_file = Image.open(file_info['filepath'])\n session_vars.filepath = file_info['filepath']\n session_vars.img_file = img_file\n return img_file", "def getImage(file):\n image = imread(file)\n image = crop(image, 64, 64)\n image = color.rgb2gray(image)\n\n return image", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)", "def getImageImage(self, name: str) -> Any:\n fullname = self.getImageFinder(name)\n try:\n pixmap = QtGui.QPixmap()\n pixmap.load(fullname)\n return pixmap\n except Exception:\n g.es(\"exception loading:\", name)\n g.es_exception()\n return None", "def get_image_by_path(image_path, target_size=None):\n img = image.load_img(image_path, target_size=target_size)\n return img", "def load_img(path):\n img = cv2.imread(path)\n return img", "def read_img(path):\r\n if os.path.isfile(path):\r\n return cv2.imread(path)\r\n else:\r\n raise ValueError('hiiiiiiiiii')", "def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def read_img(img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, img_id + '.jpg'), target_size=size)\n # img = image.img_to_array(img)\n return img", "def image(self, name=None):\n return self.find(self.images(), name=name)", "def img_in(filename):\n temp_img = Image.open(filename)\n img = np.array(temp_img)\n name = filename.split('.')[-2]\n return name, img", "def image(self):\n return cv2.imread(self.image_path)", "def get_from_file(self, filename):\n print \"loading from file...\"\n return cv2.imread(filename)", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def get_img(data_path):\n img = cv2. imread(data_path)\n img = cv2.resize(img, (64, 64))\n return img", "def get_image(self):\n image = None\n if self.image_path:\n image=ImageTk.PhotoImage(ImageOps.fit(\n Image.open(resolve_path(self.image_path)),self.size or (32,32)))\n self._hack.append(image)\n\n return image", "def read_img(img_path):\n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def loadImage(name, size=0):\n path = os.path.join(PACKAGE_HOME, 'input', name)\n fd = open(path, 'rb')\n data = fd.read()\n fd.close()\n return data", "def get_image ( self, object ):\n return self.image", "def charger_image(nom):\n dir = os.path.dirname(__file__)\n return PhotoImage(file=os.path.join(dir, nom))", "def _open_img(self, img_name):\n try:\n img = Image.open(img_name)\n photo = ImageTk.PhotoImage(img)\n return photo\n except IOError:\n Debug.printi(\"Unable to find image \" + img_name, Debug.Level.ERROR)", "def get_image(name):\r\n return nova.images.find(name=name)", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def image(request, ef_id):\n ef = get_object_or_404(ExamFile, id=ef_id)\n thumb = get_thumbnail_path(ef)\n daimage = file(thumb, 'rb').read()\n return HttpResponse(content=daimage, mimetype='image/png')", "def _openImage(self, fname):\n image = cv2.imread(fname,0)\n\n if(image != None):\n return image\n else:\n raise IOError, \"Image file can not be opened\"", "def load_image(filename):\n return tf.gfile.FastGFile(filename, 'rb').read()", "def get_images(stage=0):\n return get_files(stage)[0]", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError", "def get_image(self):\n shop = lfs_get_object_or_404(Shop, pk=1)\n return shop.image", "def get_image():\n\n url = 'http://skyview.gsfc.nasa.gov/cgi-bin/images'\n params = dict(Position='%s,%s' % (source['ra'], source['dec']),\n Survey=source['survey'].val,\n Return='GIF')\n response = requests.get(url, params=params, stream=True)\n with open(files['image.gif'].rel, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def _read_image_from_file(file_name):\n image_file = open(file_name, 'rb')\n image = image_file.read()\n image_file.close()\n return image", "def load_image(image_path):\n image = io.imread(image_path)\n io.imshow(image)\n io.show()\n print(\"Size of the image is {} KB\".format(round(os.path.getsize(image_path)/1024,2)))\n return image", "def get_image(self, imnames, idx):\r\n path = os.path.join(self.img_path, imnames[idx])\r\n return Image.open(path).convert('RGB')", "def read_image(img_path):\n\tgot_img = False\n\twhile not got_img:\n\t\ttry:\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\timg = img.resize((100,100),Image.ANTIALIAS)\n\t\t\tgot_img = True\n\t\texcept IOError:\n\t\t\tprint(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n\t\t\tpass\n\treturn img", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def get_img(self, onehot=False):\n if self.obs_vision:\n img = self.gen_obs(onehot=False)\n img = self.get_obs_render(img, CELL_PIXELS // 4)\n else:\n img = self.gen_obs(onehot=onehot)\n return img", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def get_img(path):\n # imread function converts an image to a 2d grayscale array\n img = imread(path, as_gray=True).astype(int)\n\n # resize function resize image to a specific size;\n img = resize(img, (height, width), anti_aliasing=True, preserve_range=True)\n\n return img", "def downloadImage(self, url):\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = response.read()\n io = cStringIO.StringIO(data)\n return PIL.Image.open(io)", "def _image(filename):\n return TK.PhotoImage(file=filename)", "def read_image(self, filePath):\n if filePath.endswith(\".dcm\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.expand_dims(image[0,:,:], -1)\n elif filePath.endswith(\".png\"):\n image = cv2.imread(filePath)\n image = np.array(image, dtype = \"int16\")\n elif filePath.endswith(\".mha\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.transpose(image,(1,2,0))\n return image", "def image(self, where):\n cook = cookie()\n I = Image(cook, self)\n self.call('image', cook, where)\n print(\"IMAGE\", where)\n return I", "def deserialize_image(self, data, give_file_name):\r\n # Generate a random 8-character name\r\n # name = \"img_\" + self.generate_random_name() + \".png\"\r\n name = give_file_name + \".png\"\r\n file_path = os.path.join(self.temp_dir, name)\r\n img = Image.frombytes(data['mode'], data['size'], data['pixels'])\r\n img.save(file_path)\r\n return file_path", "def imageGet(soup):\n img = soup.find('img', class_='a-hidden')\n img = str(img)\n imgURL = re.findall('https?://.+jpg', img)\n response = requests.get(imgURL[0])\n photo = Image.open(BytesIO(response.content))\n img = imgURL[0]\n\n return img" ]
[ "0.71957695", "0.70855176", "0.70432746", "0.7038061", "0.68815845", "0.6864167", "0.6857217", "0.68482894", "0.6821152", "0.6813624", "0.68091524", "0.67426455", "0.6739715", "0.6640925", "0.66380155", "0.66144145", "0.6573041", "0.65571743", "0.6553333", "0.6547051", "0.65463376", "0.6533191", "0.65323126", "0.65275234", "0.6511827", "0.649794", "0.6483717", "0.647083", "0.6422197", "0.6382685", "0.63721555", "0.63514286", "0.63354045", "0.63143295", "0.6312939", "0.6310827", "0.62970304", "0.6296265", "0.6285701", "0.6285701", "0.6285701", "0.62853014", "0.6260105", "0.62559307", "0.62557393", "0.62513936", "0.62400997", "0.6228803", "0.6228799", "0.6228086", "0.6225129", "0.6217294", "0.6216632", "0.6215492", "0.6206801", "0.61945224", "0.61940974", "0.61901224", "0.6186268", "0.6184338", "0.61838937", "0.61795145", "0.61787647", "0.61616075", "0.61570174", "0.615378", "0.61502403", "0.6145539", "0.61441296", "0.6137704", "0.6126502", "0.6113352", "0.61129713", "0.61089", "0.61087686", "0.6108614", "0.6103578", "0.61031157", "0.6102145", "0.60988265", "0.60963374", "0.60937405", "0.60889715", "0.6088009", "0.60869443", "0.60869104", "0.60851336", "0.6081994", "0.6077438", "0.6068802", "0.6067441", "0.6059155", "0.6055772", "0.60489386", "0.60452354", "0.60257006", "0.6025011", "0.6024814", "0.60235023", "0.6019663", "0.60113955" ]
0.0
-1
page list one repo
def get_repo(repo_id): if repo_id == "orphans": pkgs = Database().db.get_orphans() else: pkgs = Database().db.get_repo_pkgs(repo_id) return render_template("repo.html", title=" - "+repo_id, repos=Database().db.get_repos_names(), pkgs=pkgs, repo=repo_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def repos(request):\n # Clean up garbage created by buggy edits\n bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(\n 100, keys_only=True)\n if bad_branch_keys:\n ndb.delete_multi(bad_branch_keys)\n repo_map = {}\n for repo in models.Repository.query().fetch(1000, batch_size=100):\n repo_map[repo.key] = repo\n branches = []\n for branch in models.Branch.query().fetch(2000, batch_size=100):\n repo_key = branch.repo_key\n if repo_key in repo_map:\n branch.repository = repo_map[repo_key]\n branches.append(branch)\n branches.sort(key=lambda b: map(\n unicode.lower, (b.repository.name, b.category, b.name)))\n return respond(request, 'repos.html', {'branches': branches})", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def info(id):\n sql = \"select distinct name, description, stars, url, last_push_date, repo_id, created_date, avatar from python_repos where repo_id=\"+id\n db = get_db()\n cursor = db.execute(sql)\n repo_info = cursor.fetchall()\n return render_template('repo.html',info=repo_info)", "def show_entries():\n db = get_db()\n cur = db.execute('select distinct name,repo_id,stars, description from python_repos order by stars desc')\n entries = cur.fetchall()\n # get api\n results = get_api()\n # The update operation will consist of deletion and insertion for efficiency\n delete_entry(results)\n add_entry(results)\n return render_template('index.html', entries=entries)", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def repo_info(self, attempt=1):\n\n response = self.postman.request('repo_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n for repo in response.json():\n self.repo_list.append(repo['name'])\n\n self.repo_info(attempt=attempt + 1)", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def repository_show(ctx: click.Context, repository_name):\n subcommand_repository.cmd_show(ctx.obj, repository_name)", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def do_list(cs, args):\n data = []\n _, repositories = cs.repositories.list(args.project_id)\n for repo in repositories:\n _, tags = cs.repositories.list_tags(repo)\n for tag in tags:\n _, manifests = cs.repositories.get_manifests(repo, tag)\n manifests['Name'] = repo\n manifests['Tag'] = tag\n manifests['Project'] = args.project_id\n manifests['Id'] = manifests['Id'][0:12]\n data.append(manifests)\n fields = [\n \"Id\", \"Name\", \"Tag\", \"Author\", 'Project', \"Created\", \"Docker Version\",\n \"Architecture\", \"OS\"\n ]\n utils.print_list(data, fields, sortby=args.sortby)", "def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url", "def do_show(cs, args):\n repo = args.repository\n tag_index = repo.find(':')\n if tag_index != -1:\n tag = repo[tag_index + 1:]\n repo = repo[:tag_index]\n else:\n tag = \"latest\"\n if repo.find('/') == -1:\n repo = \"library/\" + repo\n _, data = cs.repositories.get_manifests(repo, tag)\n utils.print_dict(data)", "def test_dors_repository_index(self):\n USER_NAME = \"doryalo\"\n self.driver.get(\"https://github.com/\")\n self.main_page = page.MainPage(self.driver)\n self.main_page.commit_search(USER_NAME)\n self.search_result_page = page.SearchResultsPage(self.driver)\n self.search_result_page.filter_results_by_user_name()\n self.search_result_page.navigate_to_user_page(USER_NAME)\n self.user_page = page.UserProfilePage(self.driver)\n self.analyze_all_repositories(repositories_list=self.user_page.get_all_repositories_elements())", "def main():\n# logging.basicConfig(level=logging.DEBUG)\n try:\n user = sys.ARGV[1]\n except:\n user = 'hmm01i'\n repos = getRepos(user)\n print(\"%i Personal Repos\" % len(repos))\n logging.debug(repos)\n #print(\"Repo,[size, last update]\")\n #for k in repos.keys():\n # print(str(k),repos[k])", "def list_prs(service, repo):\n a = App()\n if repo:\n s = a.get_service(service, repo=repo)\n else:\n s = a.guess_service()\n prs = s.list_pull_requests()\n if not prs:\n print(\"No open pull requests.\")\n return\n print(tabulate([\n (\n \"#%s\" % pr['id'],\n pr['title'],\n \"@%s\" % pr['author'],\n pr['url']\n )\n for pr in prs\n ], tablefmt=\"fancy_grid\"))", "def _git_show(self, path, ref=\"HEAD\"):\n res = requests.get(\n \"/\".join([self.loc, ref, path]),\n auth=HTTPBasicAuth(self.username, self.password)\n )\n\n if res.status_code // 100 != 2:\n return None\n\n if res.headers['Content-Type'] == 'application/json':\n res = json.loads(res.content)\n # cache existence info about all directories shown!\n if path != \"talus/pypi/simple\" and res[\"type\"] == \"listing\":\n self._add_to_cache(path, items=res[\"items\"])\n else:\n res = res.content\n\n return res", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def repo_info():\n return TEST_REPOS_INFO[0]", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def list_repo_cards(self, repo):\n # check for permission\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'read')\n\n # get the relevant cards\n cards = Card.objects.all().filter(\n repo_base=self.repo_base, repo_name=repo)\n cards = sorted([c.card_name for c in cards])\n return cards", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def ls(tfr: TFR, pattern, details):\n tfr.hub.fetch_remotes()\n for repo in tfr.hub.ls(pattern):\n echo(repo.name)\n if details:\n echo(repo.html_url)\n echo(repo.ssh_url)", "def test_list_repos():\n repos = common_funcs.list_repos()\n\n assert isinstance(repos, list)", "def index(request):\n copy = '2018 ' + author\n\n context = dict(author=author, copyright=copy, repo_url=repo_url)\n\n return render(request, 'index.html', context)", "def get_repos(github_id):\r\n\r\n url = 'https://api.github.com/users/{}/repos'.format(github_id)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n repo_list = []\r\n \r\n for data in todos:\r\n repo_list.append(data['name'])\r\n\r\n return repo_list", "def test_index_repositories(self):\n self.Mokes.add_repo_to_pi()\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n index = self.client.get(\"/\").data.decode()\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"GreekLit link should be there\")\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"LatinLit link should be there\")", "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "def fetch_repos(connection):\n\n try:\n response = connection.get_json('repository')\n except HTTPRequestError as ex:\n raise exception_from_http_error(ex) from ex\n\n result = response.get('result', [])\n return [Repository(connection, repo['rid'], data=repo) for repo in result]", "async def github_repo_info(self, ctx: commands.Context, *repo: str) -> None:\n repo = \"/\".join(repo)\n if repo.count(\"/\") != 1:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The repository should look like `user/reponame` or `user reponame`.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n async with ctx.typing():\n repo_data = await self.fetch_data(f\"{GITHUB_API_URL}/repos/{quote(repo)}\")\n\n # There won't be a message key if this repo exists\n if \"message\" in repo_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The requested repository was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n embed = discord.Embed(\n title=repo_data[\"name\"],\n description=repo_data[\"description\"],\n colour=discord.Colour.blurple(),\n url=repo_data[\"html_url\"]\n )\n\n # If it's a fork, then it will have a parent key\n try:\n parent = repo_data[\"parent\"]\n embed.description += f\"\\n\\nForked from [{parent['full_name']}]({parent['html_url']})\"\n except KeyError:\n log.debug(\"Repository is not a fork.\")\n\n repo_owner = repo_data[\"owner\"]\n\n embed.set_author(\n name=repo_owner[\"login\"],\n url=repo_owner[\"html_url\"],\n icon_url=repo_owner[\"avatar_url\"]\n )\n\n repo_created_at = datetime.strptime(repo_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y\")\n last_pushed = datetime.strptime(repo_data[\"pushed_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y at %H:%M\")\n\n embed.set_footer(\n text=(\n f\"{repo_data['forks_count']} ⑂ \"\n f\"• {repo_data['stargazers_count']} ⭐ \"\n f\"• Created At {repo_created_at} \"\n f\"• Last Commit {last_pushed}\"\n )\n )\n\n await ctx.send(embed=embed)", "def get_project_page(self, name=None):\n project = self.get_project(name)\n url = project.http_url_to_repo\n if url.endswith('.git'):\n url = url[:-4]\n return url", "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "def read(self, *args, **kwargs):\r\n\r\n if not self.current_repo:\r\n # get the first available repository\r\n self.current_repo = next(self.repo_list)\r\n\r\n if self.current_repo in self.repo_done:\r\n try:\r\n # get the next available repository\r\n self.current_repo = next(self.repo_list)\r\n # call self to get the next iteration \r\n self.read() \r\n except StopIteration:\r\n raise(\"repository exhausted\")\r\n\r\n else:\r\n # iterate to get all data until (GITHUB_SUPPORTED_RESOURCES is exhausted)\r\n resource = self.build_resource(page=self.page, per_page=self.per_page)\r\n if resource: \r\n if self.current_result.get(self.current_repo, None):\r\n self.current_result['owner'] = self.owner\r\n self.current_result['repo'] = self.current_repo\r\n self.current_result['resource'] = resource \r\n else: \r\n self.current_result['resource'] = resource \r\n self.current_result['page'] = self.page\r\n self.current_result['per_page'] = self.per_page \r\n \r\n # increment pagination\r\n self.page += settings.DEFAULT_PAGE\r\n self.per_page += settings.DEFAULT_PER_PAGE\r\n else:\r\n self.repo_done.append(self.current_repo)\r\n # reset pagination\r\n self.page = settings.DEFAULT_PAGE\r\n self.per_page = settings.DEFAULT_PER_PAGE\r\n \r\n return self.current_result", "async def list(app: AppIdentity, repo: str, ref: str):\n repo = RepoName.parse(repo)\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref)\n print(await fetch.execute(sesh))", "def list_all_repos_info():\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(repo_name)\n try:\n nbr_ahead, nbr_behind = _nbr_commits_ahead_and_behind(repo)\n except git.exc.GitCommandError:\n print(f\" {repo.active_branch.name}\")\n except DetachedHeadError:\n print(f\" HEAD --> {repo.head.commit}\")\n else:\n nb_tabul = 3 if len(repo.active_branch.name) < 6 else 2\n tabuls = \"\\t\" * nb_tabul\n print(f\" {repo.active_branch.name}{tabuls}↓ {nbr_behind} ↑ {nbr_ahead}\")\n if repo.index.diff(None):\n print(\" !!! With unstaged changes !!!\")\n if repo.index.diff(\"HEAD\"):\n print(\" !!! With uncommited changes !!!\")", "def repos(self):\r\n return repositories.Repos(self)", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def repos(self):\r\n return repos.Repos(self)", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "def list(state='open'):\n reviews = parse(gh_request('GET', '/repos/:user/:repo/pulls'))\n printers.print_review_list(reviews)", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def list_articles():\n wiki = listdir(\"wiki\")\n return template(\"index\", wiki = wiki)", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def process_repo(self, url, multiple=False):\n json_data = loads(self.get_from_net(url)) #TODO add code to detect error messages in JSON from API\n if not multiple: json_data = [json_data]\n repo_dets = []\n for i in json_data:\n dets = {\n 'full_name': i['full_name'],\n 'name': i['name'],\n 'fork': i['fork'],\n 'url': i['url'],\n 'language': '',\n 'created': '',\n 'id': i[\"id\"] #for use in pagination\n }\n if 'language' in i: dets['language'] = i['language']\n if 'created_at' in i: dets['created'] = i['created_at']\n repo_dets.append(dets)\n return repo_dets", "def repos(self, page=None, per_page=None):\r\n return UserRepos(self)", "def __init__(self, *args, **kwargs):\r\n \r\n self.current_result = dict()\r\n\r\n self.owner = kwargs['owner']\r\n self.resources = kwargs.get('resources', \r\n settings.GITHUB_SETTINGS['GITHUB_SUPPORTED_RESOURCES']\r\n )\r\n\r\n self.page = kwargs.get('page', settings.DEFAULT_PAGE)\r\n self.per_page = kwargs.get('per_page', settings.DEFAULT_PER_PAGE) \r\n \r\n self.repo_list = self._get_repo_list(**kwargs)\r\n\r\n self.repo_done = []\r\n self.current_repo = None", "def contents(self):\r\n return repocontents.RepoContents(self)", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def get_info(repos):\n info = \"labelord application is master-to-master application for label replication using webhook for GitHub<br>\"\n for i in repos:\n info += i + ' ' + repo_link(i) + '<br>'\n return info", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def fetch_repo(data):\n repo = Repository.objects.get(**data)\n\n # create a temporary directory\n tmp_dir = util.tmp_dir('github')\n\n # log\n log.info(\"Fetching repo %s to %s\", repo.full_name, tmp_dir)\n\n # clone the repository to the directory\n git.Repo.clone_from(repo.git_url, tmp_dir)\n\n # add the repo path to the database\n repo.local_path = tmp_dir\n repo.save()\n\n # tell workers the repo is available\n publish('github.repo_available', data)", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def repo(self, user, repo):\r\n return repositories.Repo(self, user, repo)", "def read_pages(self):\n for file in os.listdir(self.repo_path):\n if file.endswith('.md'):\n if str(file) is not ('README.md' or '404.md'):\n with open(self.repo_path + file, 'r') as page_file:\n file_data = page_file.read()\n content_dict = self.page_call_scrapers(file_data)\n content_dict['repo'] = RepoDbIO().get_repo(\n self.user, self.repo_name)\n PageDbIO().save_db_instance(content_dict)", "def get(self, args):\n\t\tif len(args) >= 2:\n\t\t\tif args[1] == \"list\":\n\t\t\t\tself.write_line(\"LIST {0}\".format(self.config[\"daemon\"][\"rootdir\"] + \"/package-index.json\"))", "def _search_print_lines(self, repo_list, lines, fmt):\n for repo in repo_list[\"results\"]:\n if \"is_official\" in repo and repo[\"is_official\"]:\n is_official = \"[OK]\"\n else:\n is_official = \"----\"\n description = \"\"\n for dfield in (\"description\", \"short_description\"):\n if dfield in repo and repo[dfield] is not None:\n for char in repo[dfield]:\n if char == '\\n':\n break\n if char in string.printable:\n description += char\n break\n name = \"\"\n for nfield in (\"name\", \"repo_name\"):\n if nfield in repo and repo[nfield] is not None:\n name = repo[nfield]\n break\n stars = \"\"\n if \"star_count\" in repo and repo[\"star_count\"] is not None:\n stars = str(repo[\"star_count\"])\n Msg().out(fmt % (name, is_official, description, stars))\n lines -= 1\n if not lines:\n break", "def get_catalog(self):\n\n rep = req.get_json(self.CATALOG)\n repo_list = rep[\"repositories\"]\n\n for repo in repo_list:\n self.list.append(Repository(repo))\n\n return self.list", "def repository(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository\", host, owner, repo)", "def get(self, repo: Repository):\n cache_key = self.cache_key.format(repo_id=repo.id.hex)\n\n result = redis.get(cache_key)\n if result is None:\n vcs = repo.get_vcs()\n if not vcs:\n return self.respond([])\n\n vcs.ensure()\n result = vcs.get_known_branches()\n redis.setex(cache_key, json.dumps(result), self.cache_expire)\n else:\n result = json.loads(result)\n\n return self.respond([{\"name\": r} for r in result])", "def get(self, git_repo_id: int):\n document: GitRepositoryModel = self.datastore.read(document_id=git_repo_id)\n if not document:\n return None, 404\n\n return document, 200", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())", "def get_repos(org_list):\n jsonRepos = []\n for org in org_list:\n print(\"\\nScraping repositories of\", org)\n jsonRepo = load_json(\"https://api.github.com/orgs/\" + org +\n \"/repos?per_page=100\")\n for repo in jsonRepo:\n # Add field for org to make CSV file more useful\n repo['organization'] = org\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonRepos.append(repo)\n # Create a list with the items I'm interested in, then call generate_csv\n columns_list = [\n 'organization',\n 'name',\n 'full_name',\n 'stargazers_count',\n 'language',\n 'created_at',\n 'updated_at',\n 'homepage',\n 'fork',\n 'description'\n ]\n generate_csv(\"repo-list\", jsonRepos, columns_list)", "def generate_project_list_fp(config, fp):\r\n log = logging.getLogger('gitosis.gitweb.generate_projects_list')\r\n\r\n repositories = util.getRepositoryDir(config)\r\n\r\n try:\r\n global_enable = config.getboolean('gitosis', 'gitweb')\r\n except (NoSectionError, NoOptionError):\r\n global_enable = False\r\n\r\n for section in config.sections():\r\n l = section.split(None, 1)\r\n type_ = l.pop(0)\r\n if type_ != 'repo':\r\n continue\r\n if not l:\r\n continue\r\n\r\n try:\r\n enable = config.getboolean(section, 'gitweb')\r\n except (NoSectionError, NoOptionError):\r\n enable = global_enable\r\n\r\n if not enable:\r\n continue\r\n\r\n name, = l\r\n\r\n if not os.path.exists(os.path.join(repositories, name)):\r\n namedotgit = '%s.git' % name\r\n if os.path.exists(os.path.join(repositories, namedotgit)):\r\n name = namedotgit\r\n else:\r\n log.warning(\r\n 'Cannot find %(name)r in %(repositories)r'\r\n % dict(name=name, repositories=repositories))\r\n\r\n response = [name]\r\n try:\r\n owner = config.get(section, 'owner')\r\n except (NoSectionError, NoOptionError):\r\n pass\r\n else:\r\n response.append(owner)\r\n\r\n line = ' '.join([urllib.quote_plus(s) for s in response])\r\n print >>fp, line", "def __get_repo_url_by_name(self, name, repos_list):\n for repo in repos_list:\n if repo['name'] == name:\n return repo['commits_url'].split('{')[0]", "def list(request):\n files = PoFile.objects.all()\n return render_to_response('poeditor/list.html', {\n 'files' : files,\n }, context_instance=RequestContext(request))", "def query_repository(repo_name):\n return buildapi.query_repository(repo_name)", "def _list_repos(is_json):\n\n package_manager = _get_package_manager()\n repos = package_manager.get_repos()\n\n if is_json:\n return emitter.publish(repos)\n elif repos.get(\"repositories\"):\n repos = [\"{}: {}\".format(repo.get(\"name\"), repo.get(\"uri\"))\n for repo in repos.get(\"repositories\")]\n emitter.publish(\"\\n\".join(repos))\n else:\n msg = (\"There are currently no repos configured. \"\n \"Please use `dcos package repo add` to add a repo\")\n raise DCOSException(msg)\n\n return 0", "def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]", "def list():\n index = config.index\n output_format = \"%-7s %-20s %s\"\n click.secho(output_format % (\"ID\", \"CREATED\", \"BACKENDS\"), fg=\"cyan\")\n for archive in sorted(index.archives(), key=lambda x: x[\"id\"]):\n # Print it out\n click.echo(\n output_format\n % (\n archive[\"id\"],\n datetime.datetime.fromtimestamp(archive[\"created\"]).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \", \".join(sorted(archive[\"backend_names\"])),\n )\n )", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "async def pr(ctx, number: Option(int, \"Pull request number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Pull Request\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "def cli(payload_url, exclude_inactive):\n repos = get_repos_with_webhook(\n payload_url, exclude_inactive=exclude_inactive\n )\n\n for repo in repos:\n click.echo(repo_name(repo))", "def repo_init(_request):\n python = models.Repository.query(models.Repository.name == 'Python').get()\n if python is None:\n python = models.Repository(name='Python', url=SVN_ROOT)\n python.put()\n pybranches = []\n else:\n pybranches = list(models.Branch.query(models.Branch.repo_key == python.key))\n for category, name, url in BRANCHES:\n url = python.url + url\n for br in pybranches:\n if (br.category, br.name, br.url) == (category, name, url):\n break\n else:\n br = models.Branch(repo_key=python.key, repo_name='Python',\n category=category, name=name, url=url)\n br.put()\n return HttpResponseRedirect(reverse(repos))", "def show( self, trans, id, **kwd ):\n # Example URL: http://localhost:9009/api/repository_revisions/bb125606ff9ea620\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, id )\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in show: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message", "def do_search(cs, args):\n resp, data = cs.searcher.search(args.query)\n project_fields = ['id', 'name', 'public']\n print(\"Find %d Projects: \" % len(data['project']))\n utils.print_list(\n data['project'], project_fields, formatters={}, sortby='id')\n repository_fields = [\n 'repository_name', 'project_name', 'project_id', 'project_public'\n ]\n print(\"\\n\")\n print(\"Find %d Repositories: \" % len(data['repository']))\n utils.print_list(\n data['repository'],\n repository_fields,\n formatters={},\n sortby='repository_name')", "def dashboard():\n\n # get the directories in the data folder\n # (each directory represents another repo)\n repos = os.listdir(DATA)\n\n for repo in repos:\n # remove it if it's not a directory\n if not os.path.isdir(DATA + repo):\n repos.remove(repo)\n\n return render_template('home/dashboard.html', title=\"Dashboard\", repos=repos)", "def _search_repository(self, query):\n \n # raise repository search dialog\n dlg = RepositoryView(self, self._library, query=query)\n response = dlg.ShowModal()\n articles = dlg.GetArticles()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK or not articles:\n return\n \n # insert articles\n for article in articles:\n if article.checked:\n self._library.insert(article)\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def cli(ctx, repo_home):\n # Create a repo object and remember it as as the context object.\n ctx.obj = Repo(os.path.abspath(repo_home))", "def list_articles():\n\n return template(\"index\", articles=get_articles())", "def get_own_repo():\n own_repo = GitClass(name='self', url='https://github.com/meganhmoore/github-api-covid-data', owner='meganhmoore',\n repo='github-api-covid-data', branch='develop/new_data')\n return own_repo", "def get_members_repos(org_list):\n print(\"\\nGetting repositories of all members.\")\n jsonMembersRepo_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'fork',\n 'stargazers_count',\n 'forks_count',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting repositories of', member['login'])\n jsonMembersRepos = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/repos?per_page=100\")\n for repo in jsonMembersRepos:\n # Add fields to make CSV file more usable\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersRepo_list.append(repo)\n generate_csv(\"members-list\", jsonMembersRepo_list, columns_list)", "def fetch_repos(self):\n logging.info(\"Fetching repositories in: %s\" % self.name)\n list_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"list\",\n \"-n\",\n self.name,\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(list_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully fetched repositories from: %s\" % self.name)\n repos = result[\"output\"].split(\"\\n\")[:-1]\n logging.info(\"Total number of repositories: %d\" % len(repos))\n\n return repos", "def update_repos():\n with open(repolist_file, \"r\") as repofile:\n repolist = repofile.readlines()\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[i]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n git(\"fetch\", \"--all\", cwd = repodir)\n pass", "async def getList(author, page):\n availableCommands = await _generateList(author, False)\n availableCommands.sort(key=lambda x: x['name'])\n totalPages = math.floor(len(availableCommands)/10) + 1\n if page == 100:\n page = totalPages\n if page > totalPages or page < 1:\n return False\n availableCommands = availableCommands[(page-1)*10:(page)*10]\n return assembleEmbed(\n title=f\"List of Commands for `{author}` (Page {page}/{totalPages})\",\n desc=\"\\n\".join([f\"`{c['name']}` - {c['description']}\" for c in availableCommands])\n )", "def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))" ]
[ "0.7262444", "0.7217461", "0.7216748", "0.71565646", "0.6850128", "0.6780763", "0.67550576", "0.6684879", "0.6644136", "0.64276356", "0.6412211", "0.6325303", "0.6321167", "0.6286405", "0.62793666", "0.6230717", "0.61821365", "0.6155869", "0.6142341", "0.6085627", "0.60169744", "0.6015788", "0.5996967", "0.59948725", "0.5971621", "0.5969221", "0.5951896", "0.5947835", "0.5920891", "0.5909223", "0.5902664", "0.5863018", "0.5856802", "0.5847493", "0.58342636", "0.5821488", "0.58199334", "0.58072525", "0.57999", "0.5761307", "0.5756483", "0.5752802", "0.57344985", "0.5721271", "0.5719528", "0.57107776", "0.57106006", "0.5710554", "0.5707303", "0.56964046", "0.56938946", "0.56916326", "0.567406", "0.5660927", "0.5632714", "0.56295514", "0.5629499", "0.562686", "0.56243306", "0.5615936", "0.5603233", "0.5599058", "0.55881333", "0.5585866", "0.5545383", "0.55284387", "0.55108154", "0.5499992", "0.54992723", "0.54964465", "0.54925454", "0.5486797", "0.5485053", "0.5476741", "0.54737294", "0.5466894", "0.54563683", "0.54505384", "0.54468006", "0.5436248", "0.541047", "0.54022914", "0.5393388", "0.539312", "0.53874546", "0.5384149", "0.538377", "0.5379192", "0.537811", "0.537023", "0.5365812", "0.53632265", "0.5352182", "0.53467506", "0.53460276", "0.5345337", "0.53439367", "0.53439134", "0.5339845", "0.5335632" ]
0.63984853
11
page list one repo
def get_pkg(pkg_name): pkg = Database().db.get_pkg_details(pkg_name, "", False) pkg = PackageDetail.surClass(pkg) print('dir: ', dir(pkg)) #print('name:', pkg.nane) #print('props.name:', pkg.props.nane) return render_template("pkg.html", title=" - "+pkg_name, repos=Database().db.get_repos_names(), pkg=pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def repos(request):\n # Clean up garbage created by buggy edits\n bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(\n 100, keys_only=True)\n if bad_branch_keys:\n ndb.delete_multi(bad_branch_keys)\n repo_map = {}\n for repo in models.Repository.query().fetch(1000, batch_size=100):\n repo_map[repo.key] = repo\n branches = []\n for branch in models.Branch.query().fetch(2000, batch_size=100):\n repo_key = branch.repo_key\n if repo_key in repo_map:\n branch.repository = repo_map[repo_key]\n branches.append(branch)\n branches.sort(key=lambda b: map(\n unicode.lower, (b.repository.name, b.category, b.name)))\n return respond(request, 'repos.html', {'branches': branches})", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def info(id):\n sql = \"select distinct name, description, stars, url, last_push_date, repo_id, created_date, avatar from python_repos where repo_id=\"+id\n db = get_db()\n cursor = db.execute(sql)\n repo_info = cursor.fetchall()\n return render_template('repo.html',info=repo_info)", "def show_entries():\n db = get_db()\n cur = db.execute('select distinct name,repo_id,stars, description from python_repos order by stars desc')\n entries = cur.fetchall()\n # get api\n results = get_api()\n # The update operation will consist of deletion and insertion for efficiency\n delete_entry(results)\n add_entry(results)\n return render_template('index.html', entries=entries)", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def repo_info(self, attempt=1):\n\n response = self.postman.request('repo_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n for repo in response.json():\n self.repo_list.append(repo['name'])\n\n self.repo_info(attempt=attempt + 1)", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def get_repo(repo_id):\n if repo_id == \"orphans\":\n pkgs = Database().db.get_orphans()\n else:\n pkgs = Database().db.get_repo_pkgs(repo_id)\n return render_template(\"repo.html\", \n title=\" - \"+repo_id,\n repos=Database().db.get_repos_names(),\n pkgs=pkgs,\n repo=repo_id)", "def repository_show(ctx: click.Context, repository_name):\n subcommand_repository.cmd_show(ctx.obj, repository_name)", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def do_list(cs, args):\n data = []\n _, repositories = cs.repositories.list(args.project_id)\n for repo in repositories:\n _, tags = cs.repositories.list_tags(repo)\n for tag in tags:\n _, manifests = cs.repositories.get_manifests(repo, tag)\n manifests['Name'] = repo\n manifests['Tag'] = tag\n manifests['Project'] = args.project_id\n manifests['Id'] = manifests['Id'][0:12]\n data.append(manifests)\n fields = [\n \"Id\", \"Name\", \"Tag\", \"Author\", 'Project', \"Created\", \"Docker Version\",\n \"Architecture\", \"OS\"\n ]\n utils.print_list(data, fields, sortby=args.sortby)", "def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url", "def do_show(cs, args):\n repo = args.repository\n tag_index = repo.find(':')\n if tag_index != -1:\n tag = repo[tag_index + 1:]\n repo = repo[:tag_index]\n else:\n tag = \"latest\"\n if repo.find('/') == -1:\n repo = \"library/\" + repo\n _, data = cs.repositories.get_manifests(repo, tag)\n utils.print_dict(data)", "def test_dors_repository_index(self):\n USER_NAME = \"doryalo\"\n self.driver.get(\"https://github.com/\")\n self.main_page = page.MainPage(self.driver)\n self.main_page.commit_search(USER_NAME)\n self.search_result_page = page.SearchResultsPage(self.driver)\n self.search_result_page.filter_results_by_user_name()\n self.search_result_page.navigate_to_user_page(USER_NAME)\n self.user_page = page.UserProfilePage(self.driver)\n self.analyze_all_repositories(repositories_list=self.user_page.get_all_repositories_elements())", "def main():\n# logging.basicConfig(level=logging.DEBUG)\n try:\n user = sys.ARGV[1]\n except:\n user = 'hmm01i'\n repos = getRepos(user)\n print(\"%i Personal Repos\" % len(repos))\n logging.debug(repos)\n #print(\"Repo,[size, last update]\")\n #for k in repos.keys():\n # print(str(k),repos[k])", "def list_prs(service, repo):\n a = App()\n if repo:\n s = a.get_service(service, repo=repo)\n else:\n s = a.guess_service()\n prs = s.list_pull_requests()\n if not prs:\n print(\"No open pull requests.\")\n return\n print(tabulate([\n (\n \"#%s\" % pr['id'],\n pr['title'],\n \"@%s\" % pr['author'],\n pr['url']\n )\n for pr in prs\n ], tablefmt=\"fancy_grid\"))", "def _git_show(self, path, ref=\"HEAD\"):\n res = requests.get(\n \"/\".join([self.loc, ref, path]),\n auth=HTTPBasicAuth(self.username, self.password)\n )\n\n if res.status_code // 100 != 2:\n return None\n\n if res.headers['Content-Type'] == 'application/json':\n res = json.loads(res.content)\n # cache existence info about all directories shown!\n if path != \"talus/pypi/simple\" and res[\"type\"] == \"listing\":\n self._add_to_cache(path, items=res[\"items\"])\n else:\n res = res.content\n\n return res", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def repo_info():\n return TEST_REPOS_INFO[0]", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def list_repo_cards(self, repo):\n # check for permission\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'read')\n\n # get the relevant cards\n cards = Card.objects.all().filter(\n repo_base=self.repo_base, repo_name=repo)\n cards = sorted([c.card_name for c in cards])\n return cards", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def ls(tfr: TFR, pattern, details):\n tfr.hub.fetch_remotes()\n for repo in tfr.hub.ls(pattern):\n echo(repo.name)\n if details:\n echo(repo.html_url)\n echo(repo.ssh_url)", "def test_list_repos():\n repos = common_funcs.list_repos()\n\n assert isinstance(repos, list)", "def index(request):\n copy = '2018 ' + author\n\n context = dict(author=author, copyright=copy, repo_url=repo_url)\n\n return render(request, 'index.html', context)", "def get_repos(github_id):\r\n\r\n url = 'https://api.github.com/users/{}/repos'.format(github_id)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n repo_list = []\r\n \r\n for data in todos:\r\n repo_list.append(data['name'])\r\n\r\n return repo_list", "def test_index_repositories(self):\n self.Mokes.add_repo_to_pi()\n with self.logged_in(access_token=\"nbiousndegoijubdognlksdngndsgmngds\"):\n index = self.client.get(\"/\").data.decode()\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"GreekLit link should be there\")\n self.assertIn('href=\"/repo/PerseusDl/canonical-greekLit\"', index, \"LatinLit link should be there\")", "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "def fetch_repos(connection):\n\n try:\n response = connection.get_json('repository')\n except HTTPRequestError as ex:\n raise exception_from_http_error(ex) from ex\n\n result = response.get('result', [])\n return [Repository(connection, repo['rid'], data=repo) for repo in result]", "async def github_repo_info(self, ctx: commands.Context, *repo: str) -> None:\n repo = \"/\".join(repo)\n if repo.count(\"/\") != 1:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The repository should look like `user/reponame` or `user reponame`.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n async with ctx.typing():\n repo_data = await self.fetch_data(f\"{GITHUB_API_URL}/repos/{quote(repo)}\")\n\n # There won't be a message key if this repo exists\n if \"message\" in repo_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The requested repository was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n embed = discord.Embed(\n title=repo_data[\"name\"],\n description=repo_data[\"description\"],\n colour=discord.Colour.blurple(),\n url=repo_data[\"html_url\"]\n )\n\n # If it's a fork, then it will have a parent key\n try:\n parent = repo_data[\"parent\"]\n embed.description += f\"\\n\\nForked from [{parent['full_name']}]({parent['html_url']})\"\n except KeyError:\n log.debug(\"Repository is not a fork.\")\n\n repo_owner = repo_data[\"owner\"]\n\n embed.set_author(\n name=repo_owner[\"login\"],\n url=repo_owner[\"html_url\"],\n icon_url=repo_owner[\"avatar_url\"]\n )\n\n repo_created_at = datetime.strptime(repo_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y\")\n last_pushed = datetime.strptime(repo_data[\"pushed_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y at %H:%M\")\n\n embed.set_footer(\n text=(\n f\"{repo_data['forks_count']} ⑂ \"\n f\"• {repo_data['stargazers_count']} ⭐ \"\n f\"• Created At {repo_created_at} \"\n f\"• Last Commit {last_pushed}\"\n )\n )\n\n await ctx.send(embed=embed)", "def get_project_page(self, name=None):\n project = self.get_project(name)\n url = project.http_url_to_repo\n if url.endswith('.git'):\n url = url[:-4]\n return url", "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "def read(self, *args, **kwargs):\r\n\r\n if not self.current_repo:\r\n # get the first available repository\r\n self.current_repo = next(self.repo_list)\r\n\r\n if self.current_repo in self.repo_done:\r\n try:\r\n # get the next available repository\r\n self.current_repo = next(self.repo_list)\r\n # call self to get the next iteration \r\n self.read() \r\n except StopIteration:\r\n raise(\"repository exhausted\")\r\n\r\n else:\r\n # iterate to get all data until (GITHUB_SUPPORTED_RESOURCES is exhausted)\r\n resource = self.build_resource(page=self.page, per_page=self.per_page)\r\n if resource: \r\n if self.current_result.get(self.current_repo, None):\r\n self.current_result['owner'] = self.owner\r\n self.current_result['repo'] = self.current_repo\r\n self.current_result['resource'] = resource \r\n else: \r\n self.current_result['resource'] = resource \r\n self.current_result['page'] = self.page\r\n self.current_result['per_page'] = self.per_page \r\n \r\n # increment pagination\r\n self.page += settings.DEFAULT_PAGE\r\n self.per_page += settings.DEFAULT_PER_PAGE\r\n else:\r\n self.repo_done.append(self.current_repo)\r\n # reset pagination\r\n self.page = settings.DEFAULT_PAGE\r\n self.per_page = settings.DEFAULT_PER_PAGE\r\n \r\n return self.current_result", "async def list(app: AppIdentity, repo: str, ref: str):\n repo = RepoName.parse(repo)\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref)\n print(await fetch.execute(sesh))", "def list_all_repos_info():\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(repo_name)\n try:\n nbr_ahead, nbr_behind = _nbr_commits_ahead_and_behind(repo)\n except git.exc.GitCommandError:\n print(f\" {repo.active_branch.name}\")\n except DetachedHeadError:\n print(f\" HEAD --> {repo.head.commit}\")\n else:\n nb_tabul = 3 if len(repo.active_branch.name) < 6 else 2\n tabuls = \"\\t\" * nb_tabul\n print(f\" {repo.active_branch.name}{tabuls}↓ {nbr_behind} ↑ {nbr_ahead}\")\n if repo.index.diff(None):\n print(\" !!! With unstaged changes !!!\")\n if repo.index.diff(\"HEAD\"):\n print(\" !!! With uncommited changes !!!\")", "def repos(self):\r\n return repositories.Repos(self)", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def repos(self):\r\n return repos.Repos(self)", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "def list(state='open'):\n reviews = parse(gh_request('GET', '/repos/:user/:repo/pulls'))\n printers.print_review_list(reviews)", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def list_articles():\n wiki = listdir(\"wiki\")\n return template(\"index\", wiki = wiki)", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def process_repo(self, url, multiple=False):\n json_data = loads(self.get_from_net(url)) #TODO add code to detect error messages in JSON from API\n if not multiple: json_data = [json_data]\n repo_dets = []\n for i in json_data:\n dets = {\n 'full_name': i['full_name'],\n 'name': i['name'],\n 'fork': i['fork'],\n 'url': i['url'],\n 'language': '',\n 'created': '',\n 'id': i[\"id\"] #for use in pagination\n }\n if 'language' in i: dets['language'] = i['language']\n if 'created_at' in i: dets['created'] = i['created_at']\n repo_dets.append(dets)\n return repo_dets", "def repos(self, page=None, per_page=None):\r\n return UserRepos(self)", "def __init__(self, *args, **kwargs):\r\n \r\n self.current_result = dict()\r\n\r\n self.owner = kwargs['owner']\r\n self.resources = kwargs.get('resources', \r\n settings.GITHUB_SETTINGS['GITHUB_SUPPORTED_RESOURCES']\r\n )\r\n\r\n self.page = kwargs.get('page', settings.DEFAULT_PAGE)\r\n self.per_page = kwargs.get('per_page', settings.DEFAULT_PER_PAGE) \r\n \r\n self.repo_list = self._get_repo_list(**kwargs)\r\n\r\n self.repo_done = []\r\n self.current_repo = None", "def contents(self):\r\n return repocontents.RepoContents(self)", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def get_info(repos):\n info = \"labelord application is master-to-master application for label replication using webhook for GitHub<br>\"\n for i in repos:\n info += i + ' ' + repo_link(i) + '<br>'\n return info", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def fetch_repo(data):\n repo = Repository.objects.get(**data)\n\n # create a temporary directory\n tmp_dir = util.tmp_dir('github')\n\n # log\n log.info(\"Fetching repo %s to %s\", repo.full_name, tmp_dir)\n\n # clone the repository to the directory\n git.Repo.clone_from(repo.git_url, tmp_dir)\n\n # add the repo path to the database\n repo.local_path = tmp_dir\n repo.save()\n\n # tell workers the repo is available\n publish('github.repo_available', data)", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def repo(self, user, repo):\r\n return repositories.Repo(self, user, repo)", "def read_pages(self):\n for file in os.listdir(self.repo_path):\n if file.endswith('.md'):\n if str(file) is not ('README.md' or '404.md'):\n with open(self.repo_path + file, 'r') as page_file:\n file_data = page_file.read()\n content_dict = self.page_call_scrapers(file_data)\n content_dict['repo'] = RepoDbIO().get_repo(\n self.user, self.repo_name)\n PageDbIO().save_db_instance(content_dict)", "def get(self, args):\n\t\tif len(args) >= 2:\n\t\t\tif args[1] == \"list\":\n\t\t\t\tself.write_line(\"LIST {0}\".format(self.config[\"daemon\"][\"rootdir\"] + \"/package-index.json\"))", "def _search_print_lines(self, repo_list, lines, fmt):\n for repo in repo_list[\"results\"]:\n if \"is_official\" in repo and repo[\"is_official\"]:\n is_official = \"[OK]\"\n else:\n is_official = \"----\"\n description = \"\"\n for dfield in (\"description\", \"short_description\"):\n if dfield in repo and repo[dfield] is not None:\n for char in repo[dfield]:\n if char == '\\n':\n break\n if char in string.printable:\n description += char\n break\n name = \"\"\n for nfield in (\"name\", \"repo_name\"):\n if nfield in repo and repo[nfield] is not None:\n name = repo[nfield]\n break\n stars = \"\"\n if \"star_count\" in repo and repo[\"star_count\"] is not None:\n stars = str(repo[\"star_count\"])\n Msg().out(fmt % (name, is_official, description, stars))\n lines -= 1\n if not lines:\n break", "def get_catalog(self):\n\n rep = req.get_json(self.CATALOG)\n repo_list = rep[\"repositories\"]\n\n for repo in repo_list:\n self.list.append(Repository(repo))\n\n return self.list", "def repository(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository\", host, owner, repo)", "def get(self, repo: Repository):\n cache_key = self.cache_key.format(repo_id=repo.id.hex)\n\n result = redis.get(cache_key)\n if result is None:\n vcs = repo.get_vcs()\n if not vcs:\n return self.respond([])\n\n vcs.ensure()\n result = vcs.get_known_branches()\n redis.setex(cache_key, json.dumps(result), self.cache_expire)\n else:\n result = json.loads(result)\n\n return self.respond([{\"name\": r} for r in result])", "def get(self, git_repo_id: int):\n document: GitRepositoryModel = self.datastore.read(document_id=git_repo_id)\n if not document:\n return None, 404\n\n return document, 200", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())", "def get_repos(org_list):\n jsonRepos = []\n for org in org_list:\n print(\"\\nScraping repositories of\", org)\n jsonRepo = load_json(\"https://api.github.com/orgs/\" + org +\n \"/repos?per_page=100\")\n for repo in jsonRepo:\n # Add field for org to make CSV file more useful\n repo['organization'] = org\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonRepos.append(repo)\n # Create a list with the items I'm interested in, then call generate_csv\n columns_list = [\n 'organization',\n 'name',\n 'full_name',\n 'stargazers_count',\n 'language',\n 'created_at',\n 'updated_at',\n 'homepage',\n 'fork',\n 'description'\n ]\n generate_csv(\"repo-list\", jsonRepos, columns_list)", "def generate_project_list_fp(config, fp):\r\n log = logging.getLogger('gitosis.gitweb.generate_projects_list')\r\n\r\n repositories = util.getRepositoryDir(config)\r\n\r\n try:\r\n global_enable = config.getboolean('gitosis', 'gitweb')\r\n except (NoSectionError, NoOptionError):\r\n global_enable = False\r\n\r\n for section in config.sections():\r\n l = section.split(None, 1)\r\n type_ = l.pop(0)\r\n if type_ != 'repo':\r\n continue\r\n if not l:\r\n continue\r\n\r\n try:\r\n enable = config.getboolean(section, 'gitweb')\r\n except (NoSectionError, NoOptionError):\r\n enable = global_enable\r\n\r\n if not enable:\r\n continue\r\n\r\n name, = l\r\n\r\n if not os.path.exists(os.path.join(repositories, name)):\r\n namedotgit = '%s.git' % name\r\n if os.path.exists(os.path.join(repositories, namedotgit)):\r\n name = namedotgit\r\n else:\r\n log.warning(\r\n 'Cannot find %(name)r in %(repositories)r'\r\n % dict(name=name, repositories=repositories))\r\n\r\n response = [name]\r\n try:\r\n owner = config.get(section, 'owner')\r\n except (NoSectionError, NoOptionError):\r\n pass\r\n else:\r\n response.append(owner)\r\n\r\n line = ' '.join([urllib.quote_plus(s) for s in response])\r\n print >>fp, line", "def __get_repo_url_by_name(self, name, repos_list):\n for repo in repos_list:\n if repo['name'] == name:\n return repo['commits_url'].split('{')[0]", "def list(request):\n files = PoFile.objects.all()\n return render_to_response('poeditor/list.html', {\n 'files' : files,\n }, context_instance=RequestContext(request))", "def query_repository(repo_name):\n return buildapi.query_repository(repo_name)", "def _list_repos(is_json):\n\n package_manager = _get_package_manager()\n repos = package_manager.get_repos()\n\n if is_json:\n return emitter.publish(repos)\n elif repos.get(\"repositories\"):\n repos = [\"{}: {}\".format(repo.get(\"name\"), repo.get(\"uri\"))\n for repo in repos.get(\"repositories\")]\n emitter.publish(\"\\n\".join(repos))\n else:\n msg = (\"There are currently no repos configured. \"\n \"Please use `dcos package repo add` to add a repo\")\n raise DCOSException(msg)\n\n return 0", "def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]", "def list():\n index = config.index\n output_format = \"%-7s %-20s %s\"\n click.secho(output_format % (\"ID\", \"CREATED\", \"BACKENDS\"), fg=\"cyan\")\n for archive in sorted(index.archives(), key=lambda x: x[\"id\"]):\n # Print it out\n click.echo(\n output_format\n % (\n archive[\"id\"],\n datetime.datetime.fromtimestamp(archive[\"created\"]).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \", \".join(sorted(archive[\"backend_names\"])),\n )\n )", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "async def pr(ctx, number: Option(int, \"Pull request number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Pull Request\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "def cli(payload_url, exclude_inactive):\n repos = get_repos_with_webhook(\n payload_url, exclude_inactive=exclude_inactive\n )\n\n for repo in repos:\n click.echo(repo_name(repo))", "def repo_init(_request):\n python = models.Repository.query(models.Repository.name == 'Python').get()\n if python is None:\n python = models.Repository(name='Python', url=SVN_ROOT)\n python.put()\n pybranches = []\n else:\n pybranches = list(models.Branch.query(models.Branch.repo_key == python.key))\n for category, name, url in BRANCHES:\n url = python.url + url\n for br in pybranches:\n if (br.category, br.name, br.url) == (category, name, url):\n break\n else:\n br = models.Branch(repo_key=python.key, repo_name='Python',\n category=category, name=name, url=url)\n br.put()\n return HttpResponseRedirect(reverse(repos))", "def show( self, trans, id, **kwd ):\n # Example URL: http://localhost:9009/api/repository_revisions/bb125606ff9ea620\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, id )\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in show: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message", "def do_search(cs, args):\n resp, data = cs.searcher.search(args.query)\n project_fields = ['id', 'name', 'public']\n print(\"Find %d Projects: \" % len(data['project']))\n utils.print_list(\n data['project'], project_fields, formatters={}, sortby='id')\n repository_fields = [\n 'repository_name', 'project_name', 'project_id', 'project_public'\n ]\n print(\"\\n\")\n print(\"Find %d Repositories: \" % len(data['repository']))\n utils.print_list(\n data['repository'],\n repository_fields,\n formatters={},\n sortby='repository_name')", "def dashboard():\n\n # get the directories in the data folder\n # (each directory represents another repo)\n repos = os.listdir(DATA)\n\n for repo in repos:\n # remove it if it's not a directory\n if not os.path.isdir(DATA + repo):\n repos.remove(repo)\n\n return render_template('home/dashboard.html', title=\"Dashboard\", repos=repos)", "def _search_repository(self, query):\n \n # raise repository search dialog\n dlg = RepositoryView(self, self._library, query=query)\n response = dlg.ShowModal()\n articles = dlg.GetArticles()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK or not articles:\n return\n \n # insert articles\n for article in articles:\n if article.checked:\n self._library.insert(article)\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def cli(ctx, repo_home):\n # Create a repo object and remember it as as the context object.\n ctx.obj = Repo(os.path.abspath(repo_home))", "def list_articles():\n\n return template(\"index\", articles=get_articles())", "def get_own_repo():\n own_repo = GitClass(name='self', url='https://github.com/meganhmoore/github-api-covid-data', owner='meganhmoore',\n repo='github-api-covid-data', branch='develop/new_data')\n return own_repo", "def get_members_repos(org_list):\n print(\"\\nGetting repositories of all members.\")\n jsonMembersRepo_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'fork',\n 'stargazers_count',\n 'forks_count',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting repositories of', member['login'])\n jsonMembersRepos = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/repos?per_page=100\")\n for repo in jsonMembersRepos:\n # Add fields to make CSV file more usable\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersRepo_list.append(repo)\n generate_csv(\"members-list\", jsonMembersRepo_list, columns_list)", "def fetch_repos(self):\n logging.info(\"Fetching repositories in: %s\" % self.name)\n list_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"list\",\n \"-n\",\n self.name,\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(list_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully fetched repositories from: %s\" % self.name)\n repos = result[\"output\"].split(\"\\n\")[:-1]\n logging.info(\"Total number of repositories: %d\" % len(repos))\n\n return repos", "def update_repos():\n with open(repolist_file, \"r\") as repofile:\n repolist = repofile.readlines()\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[i]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n git(\"fetch\", \"--all\", cwd = repodir)\n pass", "async def getList(author, page):\n availableCommands = await _generateList(author, False)\n availableCommands.sort(key=lambda x: x['name'])\n totalPages = math.floor(len(availableCommands)/10) + 1\n if page == 100:\n page = totalPages\n if page > totalPages or page < 1:\n return False\n availableCommands = availableCommands[(page-1)*10:(page)*10]\n return assembleEmbed(\n title=f\"List of Commands for `{author}` (Page {page}/{totalPages})\",\n desc=\"\\n\".join([f\"`{c['name']}` - {c['description']}\" for c in availableCommands])\n )", "def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))" ]
[ "0.7262444", "0.7217461", "0.7216748", "0.71565646", "0.6850128", "0.6780763", "0.67550576", "0.6684879", "0.6644136", "0.64276356", "0.6412211", "0.63984853", "0.6325303", "0.6321167", "0.6286405", "0.62793666", "0.6230717", "0.61821365", "0.6155869", "0.6142341", "0.6085627", "0.60169744", "0.6015788", "0.5996967", "0.59948725", "0.5971621", "0.5969221", "0.5951896", "0.5947835", "0.5920891", "0.5909223", "0.5902664", "0.5863018", "0.5856802", "0.5847493", "0.58342636", "0.5821488", "0.58199334", "0.58072525", "0.57999", "0.5761307", "0.5756483", "0.5752802", "0.57344985", "0.5721271", "0.5719528", "0.57107776", "0.57106006", "0.5710554", "0.5707303", "0.56964046", "0.56938946", "0.56916326", "0.567406", "0.5660927", "0.5632714", "0.56295514", "0.5629499", "0.562686", "0.56243306", "0.5615936", "0.5603233", "0.5599058", "0.55881333", "0.5585866", "0.5545383", "0.55284387", "0.55108154", "0.5499992", "0.54992723", "0.54964465", "0.54925454", "0.5486797", "0.5485053", "0.5476741", "0.54737294", "0.5466894", "0.54563683", "0.54505384", "0.54468006", "0.5436248", "0.541047", "0.54022914", "0.5393388", "0.539312", "0.53874546", "0.5384149", "0.538377", "0.5379192", "0.537811", "0.537023", "0.5365812", "0.53632265", "0.5352182", "0.53467506", "0.53460276", "0.5345337", "0.53439367", "0.53439134", "0.5339845", "0.5335632" ]
0.0
-1
Returns an iterator of documents being read from disk.
def get_documents(self, batch=None): files = None if not batch: # no batch = all the batches files = self._get_batch_files() elif batch == "random": # get all the batches and pick one from random batches = self._get_batches() files = [ self._get_batch_file(batch=random.randint(1, len(batches))) ] else: # get the specified batch files = [ self._get_batch_file(batch=batch) ] # loop through all the batch files for f in files: with gzip.open(f, "rb") as infile: for line in infile: # parse the JSON for each line yield json.loads(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_documents(self):\n raise NotImplementedError", "def __iter__(self):\n return self.iter_documents()", "def iter_local_docs(docs_path, skip=0, stop=sys.maxsize):\n for i, line in enumerate(open(docs_path)):\n if i < skip:\n continue\n elif i < stop:\n yield json.loads(line)\n else:\n break", "def __iter__(self):\n for this_document in self.documents:\n yield this_document", "def _iter_from_disk(self):\n self.f.seek(0, 0) # relative to start\n for line in self.f:\n yield line\n self.f.seek(0, 2) # relative to end", "def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)", "def __iter__(self):\n try:\n # Assume it is a file-like object and try treating it as such\n # Things that don't have seek will trigger an exception\n self.source.seek(0)\n for item_no, line in enumerate(self.source):\n yield TaggedDocument(utils.to_unicode(line).split(), [item_no])\n except AttributeError:\n # If it didn't work like a file, use it as a string filename\n with utils.smart_open(self.source) as fin:\n for item_no, line in enumerate(fin):\n yield TaggedDocument(utils.to_unicode(line).split(), [item_no])", "def docs(self, fileids=None, categories=None):\n\n # Create a generator, loading one document into memory at a time.\n for path in self.abspaths(fileids):\n yield Document(path)", "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def __iter__(self):\n return self.docs.__iter__()", "def __iter__(self):\r\n doc_reader = self.searcher.doc_reader\r\n for docnum in self.scored_list:\r\n yield doc_reader[docnum]", "def __iter__(self):\n if self.documents is None:\n documents = []\n self.corpus = self.corpus.reset_index()\n for index, row in self.corpus.iterrows():\n tokens = self.tokenizer.tokenize(to_unicode(row['body']))\n documents.append(TaggedDocument(self.transformer(tokens), [index, row['stock'], row['doc_tag']]))\n self.documents = documents\n\n return self.documents.__iter__()", "def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you", "def iter_docids(self):\n return iter(self.client.smembers(self.dbprefix + 'docs'))", "def documents(self):\r\n return doc.Documents(self)", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def make_documents(f, index: str) -> typing.Iterator[dict]:\n\n while True:\n line = f.readline()\n if not line:\n break\n idx = int(line.strip())\n line = f.readline()\n doc = {\n '_index': index,\n '_type': \"_doc\",\n '_source': line.strip(),\n '_id': idx,\n }\n yield doc", "def __iter__(self):\n for document in self.query:\n yield self._to_document(document)", "def getDocuments(self):\n return self.objectValues('Multifile')", "def load(self):\n raise NotImplementedError\n yield document", "def __iter__(self):\n for item in self._reader:\n yield item", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def read_documents(*paths):\n for path in paths:\n suffix = path.suffix.lower()\n\n # If compressed, open as gzip stream.\n opener = open\n if suffix == '.gz':\n suffix = path.suffixes[-2].lower()\n opener = gzip.open\n\n if suffix in ('.yaml', '.yml'):\n try:\n for parsed_doc in yaml.load_all(opener(str(path), 'r'), Loader=NoDatesSafeLoader):\n yield path, parsed_doc\n except yaml.YAMLError as e:\n raise InvalidDocException('Failed to load %s: %s' % (path, e))\n elif suffix == '.json':\n try:\n yield path, json.load(opener(str(path), 'r'))\n except ValueError as e:\n raise InvalidDocException('Failed to load %s: %s' % (path, e))\n else:\n raise ValueError('Unknown document type for {}; expected one of {!r}.'\n .format(path.name, _ALL_SUPPORTED_EXTENSIONS))", "def read_docs(file_path, tokenizer):\n # working structure used to store each document\n all_docs = []\n doc, end_of_doc = [], False\n\n line_cnt = 0\n tf.logging.info(\"Start processing %s\", file_path)\n for line in tf.io.gfile.GFile(file_path):\n if line_cnt % 100000 == 0:\n tf.logging.info(\"Loading line %d\", line_cnt)\n\n if not line.strip():\n # encounter an empty line (end of a document)\n end_of_doc = True\n cur_sent = []\n else:\n cur_sent = tokenizer.convert_text_to_ids(line.strip())\n\n if cur_sent:\n line_cnt += 1\n doc.append(np.array(cur_sent))\n\n # form a doc\n if end_of_doc or sum(map(len, doc)) >= FLAGS.max_doc_len:\n # only retain docs longer than `min_doc_len`\n doc_len = sum(map(len, doc))\n if doc_len >= max(FLAGS.min_doc_len, 1):\n all_docs.append(doc)\n\n # refresh working structs\n doc, end_of_doc = [], False\n\n # deal with the leafover if any\n if doc:\n # only retain docs longer than `min_doc_len`\n doc_len = sum(map(len, doc))\n if doc_len >= max(FLAGS.min_doc_len, 1):\n all_docs.append(doc)\n\n tf.logging.info(\"Finish %s with %d docs from %d lines.\", file_path,\n len(all_docs), line_cnt)\n\n return all_docs", "def __iter__(self):\n\n return iter(self.files)", "def documents(self) -> list[str]:\n return self._documents", "def read(self):\n self.connect()\n get_books = f\"select * from {self.book_table}\"\n try:\n self.cur.execute(get_books)\n self.con.commit()\n for i in self.cur:\n yield i\n except MySQLError as err:\n messagebox.showinfo(\"Failed to fetch files from database\")\n print(err)", "def get_one_shot_iterator(self):\n\n files = self._get_all_files()\n\n dataset = (\n tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)\n .map(self._parse_function, num_parallel_calls=self.num_readers)\n .map(self._preprocess_image, num_parallel_calls=self.num_readers))\n\n if self.should_shuffle:\n dataset = dataset.shuffle(buffer_size=100)\n\n if self.should_repeat:\n dataset = dataset.repeat() # Repeat forever for training.\n else:\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)\n return dataset.make_one_shot_iterator()", "def _get_docs(self, encoder, path):\n # Check if already loaded\n if path in self._encoder_docs:\n self._logger.debug(\"Loading documents from cache: \" + path)\n return self._encoder_docs[path]\n else:\n self._logger.debug(\"Loading documents from disk: \" + path)\n docs = encoder.load_documents(path)\n self._encoder_docs[path] = docs\n self._logger.debug(\"Added documents to cache: \" + path)\n return docs", "def open_file(self) -> Iterator[NamedIO]:\n with open(self.filename) as f:\n yield cast(NamedIO, f)", "def wackydocs():\n for i, fil in enumerate(WACKYFILES):\n print >> sys.stderr, \"Reading wackypedia file %s %s...\" % (fil, common.str.percent(i+1, len(WACKYFILES)))\n print >> sys.stderr, stats()\n for j, doc in enumerate(wackydocs_in_file(fil)):\n if j % 10000 == 0:\n print >> sys.stderr, \"Reading wackypedia file %s %s, document #%d...\" % (fil, common.str.percent(i+1, len(WACKYFILES)), j)\n print >> sys.stderr, stats()\n yield doc", "def getAllFileRecordsIter(fs_name):\n files = None\n session = Queries.createSession()\n try:\n fs_db = session.execute(sqlalchemy.select([FileSpace]).where(FileSpace.storage_name == fs_name)).fetchone()\n catalog = session.execute(sqlalchemy.select([Catalog]).where(Catalog.fs_id == fs_db.id)).fetchone()\n files = session.query(FileTable).filter_by(catalog_id=catalog.id)\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return files", "def get_documents() -> list[Document]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Document)]", "def __iter__(self):\n # pylint: disable=no-member\n with builtins.open(self.path,\n mode=self.mode,\n buffering=self.buffering,\n encoding=self.encoding,\n errors=self.errors,\n newline=self.newline) as file_content:\n for line in file_content:\n yield line", "def list_documents(\n self, index: str, query: Dict[str, Any] = None\n ) -> Iterable[Dict[str, Any]]:\n return es_scan(self.__client__, query=query or {}, index=index)", "def _get_iter(self, url, params):\n for current_page_index in itertools.count():\n result_dict = self._get_page(url, params, current_page_index)\n for document in result_dict['entries']:\n yield document\n if not result_dict['isNextPageAvailable']:\n break", "def getDocuments(self):\n documents = []\n\n for document in self.metaData.jsonObj['documents']:\n d = HyperLinkResource(document)\n documents.append(Document(self._client, d.selfLink))\n\n return documents", "def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return", "def __iter__(self):\n return iter(self._get_storage())", "def open_files(directory):\n documents = []\n for fl in (os.listdir(directory)):\n if fl.endswith('.txt'):\n fl_path = os.path.join(directory, fl)\n with open(fl_path, 'r') as f:\n full_text = f.read()\n documents.append(full_text)\n return documents", "def all_documents(self):\n return [item[0] for item in\n self.sql_session.query(Document).values(Document.id)]", "def read_iter_from_file(path_to_file_read):\n with open(path_to_file_read, \"r\") as fichero:\n line = fichero.readline().strip()\n while line:\n yield line\n line = fichero.readline().strip()", "def __iter__(self):\r\n try:\r\n dup_fp = self._fp.dup()\r\n except self._fp.Error:\r\n log.error('Failed to dup %r' % self._fp)\r\n return\r\n\r\n try:\r\n while True:\r\n blob = RecordIO.Reader.do_read(dup_fp, self._codec)\r\n if blob:\r\n yield blob\r\n else:\r\n break\r\n finally:\r\n dup_fp.close()", "def collect_documents(self):\n documents = []\n ignored = []\n for path in self.paths:\n try:\n current_document = MAE_Document(path)\n except UnsupportedMIMETypeError as e:\n ignored.append(str(e))\n else:\n documents.append(current_document)\n if ignored:\n print \"Some files were ignored:\"\n for file in ignored:\n print \"\\t%s\" % file\n return documents", "def iter(self):\n for _file in self._files:\n with asHandle(_file) as fp:\n # Use FastqGeneralIterator because it provides access to\n # the unconverted quality string (i.e., it doesn't try to\n # figure out the numeric quality values, which we don't\n # care about at this point).\n for sequenceId, sequence, quality in FastqGeneralIterator(fp):\n yield self.readClass(sequenceId, sequence, quality)", "def __iter__(self):\n for f in self.path.glob('**/*'):\n if f.is_file() and not os.stat(str(f.resolve())).st_size == 0:\n yield Resource(str(f.resolve()), DiskCrawler.compute_digest)", "def get_docs_sources(self):\n docs = [doc for doc,_,_ in self.doc_to_get]\n \n retry_until_ok(self.docman.elastic.indices.refresh, index=\"\")\n documents = self.docman.elastic.mget(body={'docs': docs})\n return documents", "def __iter__(self):\n for tokens in readbook(self.path, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)", "def fileStreamer(self, filePath):\n\t\tfor line in open(filePath):\n\t\t\tyield line", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(cached_path(file_path), \"r\", encoding=\"utf-8\") as data_file:\n for lines in read_conll_file(data_file):\n fields = [line.rstrip().split(self._delimiter) for line in lines]\n # unzipping trick returns tuples, but our Fields need lists\n fields = [list(field) for field in zip(*fields)]\n tokens, tags = fields\n yield self.text_to_instance(tokens, tags)", "def get_files(self, block):\n \n raise NotImplementedError('get_files')", "def get_input_contents(self):\n try:\n ret_files = []\n coll = self.collections[self._primary_input_collection]\n ret_file = {'coll_id': coll['coll_id'],\n 'scope': coll['scope'],\n 'name': coll['name'],\n 'bytes': coll.coll_metadata['bytes'],\n 'adler32': None,\n 'min_id': 0,\n 'max_id': coll.coll_metadata['total_files'],\n 'content_type': ContentType.File,\n 'content_metadata': {'total_files': coll['coll_metadata']['total_files']}\n }\n ret_files.append(ret_file)\n return ret_files\n except Exception as ex:\n self.logger.error(ex)\n self.logger.error(traceback.format_exc())\n raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))", "def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]", "def list_documents(self, engine_name, current=1, size=20):\n data = { 'page': { 'current': current, 'size': size } }\n return self.swiftype_session.request('get', \"engines/{}/documents/list\".format(engine_name), json=data)", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_files(self):\n return self.ebook_file.get_files()", "def __iter__(self) -> Iterator:\n return iter(self.get_data_loader())", "def documents(self):\n from kitsune.wiki.models import Document\n\n return Document.objects.filter(documentimage__image=self)", "def es_iterate_all_documents(es, index, pagesize=250, scroll_timeout=\"3m\", **kwargs):\n is_first = True\n while True:\n # Scroll next\n if is_first: # Initialize scroll\n result = es.search(index=index, scroll=\"1m\", **kwargs, body={\n \"size\": pagesize\n })\n is_first = False\n else:\n result = es.scroll(body={\n \"scroll_id\": scroll_id,\n \"scroll\": scroll_timeout\n })\n scroll_id = result[\"_scroll_id\"]\n hits = result[\"hits\"][\"hits\"]\n # Stop after no more docs\n if not hits:\n break\n # Yield each entry\n yield from (hit['_source'] for hit in hits)", "def get_readers():\n return all_readers", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "def GetDocumentListFeed(self):\n q = gdata.docs.service.DocumentQuery();\n return self.QueryDocumentListFeed(q.ToUri())", "def _all_docs_by_page(db_url, page_size=10):\n # Tell CouchDB we only want a page worth of documents at a time, and that\n # we want the document content as well as the metadata\n view_arguments = {'limit': page_size, 'include_docs': \"true\"}\n\n # Keep track of the last key we've seen\n last_key = None\n\n while True:\n response = requests.get(db_url + \"/_all_docs\", params=view_arguments)\n\n # If there's been an error, stop looping\n if response.status_code != 200:\n _print_error(\"Error from DB: \" + str(response.content))\n break\n\n # Parse the results as JSON. If there's an error, stop looping\n try:\n results = json.loads(response.content)\n except:\n _print_error(\"Unable to parse JSON: \" + str(response.content))\n break\n\n # If there's no more data to read, stop looping\n if 'rows' not in results or not results['rows']:\n break\n\n # Otherwise, keep yielding results\n for r in results['rows']:\n last_key = r['key']\n yield r\n\n # Update the view arguments with the last key we've seen, so that we\n # can step forward properly by page. (Of course, we actually need a key\n # that is just _after_ the last one we've seen, so tack on a high\n # Unicode character).\n # Note that CouchDB requires keys to be encoded as JSON\n last_key = last_key + u'\\xff'\n view_arguments.update(startkey=json.dumps(last_key))", "def get_iter(self, numPerIter=None):\n if numPerIter == None:\n numPerIter = self.chunk_size\n while True:\n els = self.read(numPerIter)[:]\n if els.shape[0] == 0:\n break\n yield els", "def documents():\n for domain in os.listdir(DOCUMENT_FOLDER):\n for docname in os.listdir(os.path.join(DOCUMENT_FOLDER, domain)):\n filename = os.path.join(DOCUMENT_FOLDER, domain, docname)\n if filename.endswith(\".html\"):\n fullDocname = os.path.join(domain, docname)\n yield (fullDocname, filename)", "def tfrecord_iterator(\n data_path: str,\n index_path: typing.Optional[str] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[memoryview]:\n if compression_type == \"gzip\":\n file = gzip.open(data_path, \"rb\")\n elif compression_type is None:\n file = io.open(data_path, \"rb\")\n else:\n raise ValueError(\"compression_type should be either 'gzip' or None\")\n length_bytes = bytearray(8)\n crc_bytes = bytearray(4)\n datum_bytes = bytearray(1024 * 1024)\n\n def read_records(start_offset=None, end_offset=None):\n nonlocal length_bytes, crc_bytes, datum_bytes\n\n if start_offset is not None:\n file.seek(start_offset)\n if end_offset is None:\n end_offset = os.path.getsize(data_path)\n while file.tell() < end_offset:\n if file.readinto(length_bytes) != 8:\n raise RuntimeError(\"Failed to read the record size.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the start token.\")\n length, = struct.unpack(\"<Q\", length_bytes)\n if length > len(datum_bytes):\n datum_bytes = datum_bytes.zfill(int(length * 1.5))\n datum_bytes_view = memoryview(datum_bytes)[:length]\n if file.readinto(datum_bytes_view) != length:\n raise RuntimeError(\"Failed to read the record.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the end token.\")\n yield datum_bytes_view\n\n if index_path is None:\n yield from read_records()\n else:\n index = np.loadtxt(index_path, dtype=np.int64)[:, 0]\n if shard is None:\n offset = np.random.choice(index)\n yield from read_records(offset)\n yield from read_records(0, offset)\n else:\n num_records = len(index)\n shard_idx, shard_count = shard\n start_index = (num_records * shard_idx) // shard_count\n end_index = (num_records * (shard_idx + 1)) // shard_count\n start_byte = index[start_index]\n end_byte = index[end_index] if end_index < num_records else None\n yield from read_records(start_byte, end_byte)\n\n file.close()", "def _collect(self, text_directory) -> Iterator[Any]:\n return dataset_path_iterator(text_directory, self.configs.file_ext)", "def read_document(self):\n words = self.word_runner()\n word = \"press space to start\"\n orp_ind = 13\n try:\n while True:\n time.sleep(60 / self.wpm)\n\n if self.is_reading:\n word = next(words)\n orp_ind = int(self.orp_index(word))\n\n yield (word, orp_ind)\n except StopIteration:\n pass\n finally:\n del words", "def read_file(filename):\n\n all_documents = []\n document = []\n with tf.gfile.GFile(filename, \"r\") as reader:\n for line in reader:\n line = line.strip()\n if not line:\n continue\n if line.lower()[:7] == \"chapter\":\n if document:\n all_documents.append(document)\n document = []\n else:\n document.append(line)\n if document:\n all_documents.append(document)\n\n return all_documents", "def __iter__(self):\n for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)", "def iterate_corpus(corpus_file):\n logging.info(\"Opening corpus file...\")\n with zipfile.ZipFile(corpus_file) as corpus:\n for name in corpus.namelist():\n logging.info(\"Reading contents of file '{}'.\".format(name))\n with corpus.open(name) as f:\n try:\n tree = etree.parse(f)\n root = tree.getroot()\n yield root\n except etree.ParseError:\n message = \"File '{}' could not be read due to ParseError.\"\n logging.warning(message.format(name))\n continue", "def get_documents(path_to_dir, qn_id):\n files_to_question = []\n directory = os.path.join(path_to_dir, str(qn_id))\n filenames = os.listdir(directory)\n filenames.sort(key=int)\n\n for filename in filenames:\n doc_name, _ = os.path.splitext(filename)\n document_filepath = os.path.join(directory, filename)\n with open(document_filepath, 'rb') as subfile:\n subfile_data = subfile.readlines()\n\n # Use tried-and-tested tokenizer code from P1...\n tokenized_sentences = string_to_tokens(subfile_data)\n if not tokenized_sentences:\n continue\n\n doc = Document(doc_id=int(doc_name), qn_id=qn_id, content=tokenized_sentences)\n files_to_question.append(doc)\n\n return files_to_question", "def read(self):\n return self.client.get(\n index=self.index,\n id=self.document_id,\n ignore=[404],\n )['_source']", "def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)", "def retrieve_all_documents(\n self,\n collection_name: str,\n sort: List = [],\n asc: bool = True,\n include_vector: bool = True,\n include_fields: List = [],\n retrieve_chunk_size: int=1000,\n **kwargs\n ):\n num_of_docs = self.collection_stats(collection_name)['number_of_documents']\n with self.progress_bar(list(range(int(num_of_docs/ retrieve_chunk_size)))) as pbar:\n d = self.retrieve_documents(\n collection_name=collection_name, page_size=retrieve_chunk_size, sort=sort, asc=asc, include_vector=include_vector,\n include_fields=include_fields, **kwargs\n )\n all_docs = d[\"documents\"]\n pbar.update(1)\n while len(d[\"documents\"]) > 0:\n d = self.retrieve_documents(\n collection_name=collection_name,\n page_size=retrieve_chunk_size,\n cursor=d[\"cursor\"],\n sort=sort,\n asc=asc,\n include_vector=include_vector,\n include_fields=include_fields\n )\n all_docs += d[\"documents\"]\n pbar.update(1)\n return all_docs", "async def stream_result_files(self) -> AsyncGenerator[StreamInfoUrl, None]:\n for dataset in self.datasets:\n async for file in dataset.get_data_rootfiles_stream(self.query.value()):\n yield file", "def iter_es_docs(es_host, es_index, es_type, query={\"_source\": False}):\n es = Elasticsearch(es_host)\n scroll = helpers.scan(es, query=query, index=es_index, doc_type=es_type)\n try:\n for x in scroll:\n yield x\n except elasticsearch.exceptions.NotFoundError as ex:\n print(ex, file=sys.stderr)\n finally:\n del es # No close() method. Guess this works.", "def training_documents(self):\n return self._training_documents", "def get_meta_of_files(session=konfuzio_session()) -> List[dict]:\n url = get_documents_meta_url()\n result = []\n\n while True:\n r = retry_get(session, url)\n data = r.json()\n if isinstance(data, dict) and 'results' in data.keys():\n result += data['results']\n if 'next' in data.keys() and data['next']:\n url = data['next']\n else:\n break\n else:\n result = data\n break\n\n sorted_documents = sorted(result, key=itemgetter('id'))\n return sorted_documents", "def read_all_files():\n paths = get_all_recording_paths()\n\n return read_by_paths(paths)", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def getMyDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n creator=uname, implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n return ( total_objects, documents, )", "def gist_documents(self, username, max_docs=None):\n r = self.requests_session.get(self.gist_path.format(username=username))\n if r.status_code != 200:\n self.log(f\"Couldn't get gists for {username}\", \"ERROR\")\n return\n\n docs_fetched = 0\n for d in r.json():\n\n docs_fetched += 1\n yield d\n \n if docs_fetched >= self.items_per_page:\n # this will only print once \n # TODO pagination\n msg = (f\"TODO pagination not enabled so gists by user:{username} might have be \"\n f\"skipped as they have written more than {self.items_per_page} gists.\"\n )\n self.log(msg, \"WARNING\")\n\n if max_docs is not None and docs_fetched > max_docs:\n return", "def getAccessibleDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n IsManager = user.IsManager()\n IsAdmin = user.IsAdmin()\n\n membership.updateLoginTime( uname )\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n if not IsManager and documents:\n res = []\n system_objects = CustomSystemObjects()\n for x in documents:\n try: path = x.getPath()\n except: continue\n IsSystem = 0\n for key in system_objects:\n if path.find( key ) > -1:\n IsSystem = 1\n break\n if not IsSystem:\n res.append( x )\n return ( total_objects, res, )\n\n return ( total_objects, documents, )", "def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])", "def getFiles(self, getContent=False):\n for index, file in enumerate(self.files):\n if getContent:\n logger.debug(\n \"get file {} from service {}\".format(\n file, self.servicename)\n )\n content = self.getFile(index)\n\n yield file, content\n else:\n yield file", "def documents(self):\r\n return GlobalDocuments(self)", "def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list", "def reading(self):\n if (\n self.reading_handle is None\n and self.writing_handle is None\n and self.filename is not None\n ):\n with self._open() as fp:\n self.reading_handle = fp\n try:\n yield\n finally:\n self.reading_handle = None\n else:\n yield", "def items(self):\n return self.docs.items()", "def __iter__(self):\r\n example = []\r\n for line in open(self.fullpath):\r\n if line != '\\n':\r\n example.append(line.rstrip()) # remove newline\r\n else:\r\n yield example\r\n example = []", "def _file_iter(f, size):\n chunk = f.read(size)\n while chunk:\n yield chunk\n chunk = f.read(size)", "def __iter__(self):\n\n if self.is_view:\n return iter(self._view)\n\n return iter(self._storage)", "def get_mail_docs_in_bucket():\n if BUCKET_ID not in settings.DOCUMENT_BUCKETS:\n raise ImproperlyConfigured(f'Bucket \"{BUCKET_ID}\" is missing in settings')\n\n config = settings.DOCUMENT_BUCKETS[BUCKET_ID]\n if 'bucket' not in config:\n raise ImproperlyConfigured(f'Bucket \"{BUCKET_ID}\" not configured properly in settings')\n\n name = config['bucket']\n if not name:\n raise ImproperlyConfigured(\n f'Bucket \"{BUCKET_ID}\" bucket value not configured properly in settings',\n )\n\n client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID)\n\n paginator = client.get_paginator('list_objects')\n for page in paginator.paginate(Bucket=name):\n for doc in page.get('Contents') or []:\n key = doc['Key']\n with tempfile.TemporaryFile(mode='w+b') as f:\n client.download_fileobj(Bucket=name, Key=key, Fileobj=f)\n f.seek(0)\n content = f.read()\n yield {'source': key, 'content': content}", "def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()", "def file_reader(self, filename):\n\n for chunk in pd.read_csv(filename, chunksize = Reader.chunksize,delimiter='_'): \n yield chunk", "def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)", "def __iter__(self):\n for hit in self._evaluate()['hits']['hits']:\n yield self._to_document(hit)" ]
[ "0.7139281", "0.66851294", "0.6661767", "0.6630697", "0.661265", "0.65598124", "0.655498", "0.6535111", "0.64075583", "0.6239595", "0.6149933", "0.61477643", "0.61379325", "0.6104141", "0.6089389", "0.6088322", "0.60240936", "0.60088474", "0.5915154", "0.5884814", "0.58526784", "0.58074826", "0.5763371", "0.5722361", "0.5705438", "0.57021177", "0.5701374", "0.5696764", "0.56815165", "0.5675811", "0.56638473", "0.5608316", "0.5603283", "0.55998135", "0.5590417", "0.555517", "0.5551519", "0.5525575", "0.55213803", "0.5513558", "0.5511215", "0.5495051", "0.54944265", "0.54835635", "0.54715383", "0.5464893", "0.5460666", "0.5453407", "0.5449952", "0.54378283", "0.54308593", "0.5427517", "0.5424707", "0.5408128", "0.54053515", "0.54053515", "0.54053515", "0.5405061", "0.5401034", "0.53958875", "0.5391533", "0.53874", "0.5364425", "0.5361068", "0.53586024", "0.5352649", "0.5349233", "0.5345056", "0.533967", "0.5338266", "0.53366274", "0.5327817", "0.53258455", "0.5299552", "0.52984565", "0.5296659", "0.5295228", "0.5291053", "0.52888983", "0.5273602", "0.52714235", "0.52714115", "0.52639955", "0.5260967", "0.52578294", "0.5244598", "0.5240442", "0.5227783", "0.5208286", "0.5204492", "0.51950216", "0.5193612", "0.51891905", "0.5188757", "0.5188348", "0.51883054", "0.518824", "0.5185419", "0.51690936", "0.5166509" ]
0.61733896
10
Returns an interator of paragraphs being read from disk.
def get_paragraphs(self, batch=None): # loop through the document stream for this document database for document in self.get_documents(batch): for paragraph in document["paragraphs"]: # yield the paragraphs one by one yield paragraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)", "def generate_paragraphs(self, count=3):\n\n with self.open_text_data() as f:\n result = self.read_paragraphs(f, count=count)\n return result", "def _paragraphs_raw(self):\n for par in self.parsed.find_all(\"p\")[self.PAR_START:]:\n yield par", "def read(self, paragraph_idx=None):\n if paragraph_idx:\n self.paragraphs[paragraph_idx].read()\n else:\n for paragraph in self.paragraphs:\n paragraph.read()", "def get_parsed_paragraphs_from_file(self, processed_path):\n with open(processed_path, \"r\") as f:\n sent_len = json.loads(f.readline())['sentence_lens']\n paragraphs = list()\n line_no = 1\n para_idx = 0\n while para_idx < len(sent_len):\n paragraph = list()\n end_no = sent_len[para_idx]\n while line_no < end_no:\n sent = json.loads(f.readline())\n sent[\"sid\"] = self.generate_sid(sent, processed_path, line_no)\n paragraph.append(sent)\n line_no += 1\n para_idx += 1\n paragraphs.append(paragraph)\n return paragraphs", "def load_retrieved_paragraphs(retrieved_dp, cid):\n\n if not exists(retrieved_dp):\n raise ValueError('retrieved_dp does not exist: {}'.format(retrieved_dp))\n\n fp = join(retrieved_dp, cid)\n with io.open(fp, encoding='utf-8') as f:\n content = f.readlines()\n\n original_paras = [ll.rstrip('\\n').split('\\t')[-1] for ll in content]\n\n para_tuples = [dataset_parser._proc_para(pp, rm_dialog=False, rm_stop=True, stem=True, to_str=True)\n for pp in original_paras]\n\n original_paras, processed_paras = list(zip(*para_tuples)) # using the new \"original\" to keep consistency\n\n return original_paras, processed_paras # for compatibility of document organization for similarity calculation", "def paragraphs(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}')+1)]\n number_of_paragraphs = len(list(root.iter(root_tag + 'p')))\n return number_of_paragraphs", "def generate_paragraphs(self):\n def dig(hr_tag, end_index):\n paragraphs = []\n for tag in hr_tag.children:\n if tag.name == 'hr':\n return paragraphs + dig(tag, end_index)\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if '$' in text and not tag.find('table'):\n start_index = document_txt.index(text[:search_chars])\n end_index = start_index + len(text)\n paragraphs.append({\n 'text': text,\n 'start': start_index,\n 'end': end_index\n })\n return paragraphs\n\n with open('document.txt', 'rb') as f1:\n document_txt = f1.read().decode()\n search_chars = 20\n paragraphs = dig(self.soup.find('body'), 0)\n paragraphs = sorted(paragraphs, key=lambda x: x['start'])\n with open('paragraphs.txt', 'wb') as f2:\n f2.write(json.dumps(paragraphs, indent=2, sort_keys=True).encode())", "def paragraphs(self, data=True):\n return self.nodes(self.max_depth, data)", "def _get_all_paragraphs(self) -> List[Paragraph]:\n documents = self.document_store.get_all_documents()\n\n paragraphs = []\n p_id = 0\n for doc in documents:\n for p in doc.text.split(\"\\n\\n\"): # TODO: this assumes paragraphs are separated by \"\\n\\n\". Can be switched to paragraph tokenizer.\n if not p.strip(): # skip empty paragraphs\n continue\n paragraphs.append(\n Paragraph(document_id=doc.id, paragraph_id=p_id, text=(p,), meta=doc.meta)\n )\n p_id += 1\n logger.info(f\"Found {len(paragraphs)} candidate paragraphs from {len(documents)} docs in DB\")\n return paragraphs", "def paragraph(self, text):\n return [text]", "def get_lines_from_source(self):\n extension = self.get_doc_file_extension()\n if extension in ('txt', ''):\n return tuple(line.decode('utf-8') for line in self.doc_file.readlines())\n elif extension == 'docx':\n docx_document = Docx(BytesIO(self.doc_file.read()))\n return tuple(paragrah.text for paragrah in docx_document.paragraphs)\n elif extension == 'pdf':\n raise NotImplementedError()\n else:\n raise ValueError(\"file_format not supported\")", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def get_paragraph(self):\r\n \r\n size = self.paragraph_sizes.get()\r\n size += int(random.randrange(int(size * 0.8), int(size * 1.2)))\r\n \r\n lines = []\r\n paragraph_length = 0\r\n while paragraph_length < size:\r\n sentence, length = self.get_sentence()\r\n lines.append(sentence)\r\n paragraph_length += length\r\n\r\n paragraph = \"\\t\" + \" \".join(lines) + \"\\n\\n\"\r\n return paragraph, paragraph_length", "def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)", "def paragraphs(iterable, splitter):\n assert isinstance(splitter, (tuple, list))\n splitter = tuple(splitter)\n paragraph = []\n for line in iterable:\n if line.startswith(splitter):\n if paragraph:\n yield paragraph\n paragraph = [line]\n else:\n paragraph.append(line)\n if paragraph:\n yield paragraph", "def get_paragraphs(text):\n return [LINE_START.sub(' ', p) for p in PARAGRAPH_SEP.split(text)]", "def get_number_of_paragraph(self):\n file_to_read = f'{self.path}/{self.filename}'\n file = open(file_to_read, 'r', encoding='utf-8')\n string_to_match = '<p>'\n count = 0\n for line in file:\n if string_to_match in line:\n count += 1\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_paragraph', count)\n print(datetime.now(), '-', 'number_of_paragraph for', self.filename, 'calculated =', count)\n return None", "def readlines(self):\n return list(self.iterlines())", "def paragraphs_to_lines(doc: List[List[str]]) -> List[str]:\n lines = []\n for pg in doc:\n lines.extend(pg)\n lines.append(\"\")\n\n return lines", "def __read_lines__(self):\r\n fd = open(self.input_file, \"r\")\r\n lines = fd.readlines()\r\n fd.close()\r\n return lines", "def linked_text_paragraphs(self):\n for par in self._main_paragraphs_raw():\n par_links = par.find_all('a')\n if len(par_links) == 0:\n self.main_count += len(par.text)\n yield par.text\n else:\n for el in par.contents:\n if el.name is None:\n #this is plain text\n self.main_count += len(str(el))\n yield str(el)\n elif el.name == \"a\" and \"href\" in el.attrs:\n id = el[\"href\"].lstrip('#')\n try:\n foot_par = self._get_footnote_par(id)\n except NoFootnoteError:\n self.log(f\"Could not find footnote for {id}, skipping.\")\n self.footnote_count += len(foot_par.text)\n yield foot_par.text", "def readlines(self):\n return [line for line in self]", "def readlines(self) -> list[bytes] | None:", "def readlines(self):\n return [\"\"] + self.get(\"1.0\", END).split(\"\\n\")[:-1]", "def extract_text(path, pages):\n out = []\n with open(path, 'rb') as file:\n pdftotext_string = pdftotext.PDF(file)\n\n for i in pages:\n out.append(pdftotext_string[i - 1])\n\n return out", "def _get_file_lines(self):\n\n # check if the PO/POT file is readable\n if self.__name is None or not os.access(self.__name, os.R_OK):\n print_error_message(\"PO file does not exist or is not readable\")\n sys.exit( )\n\n # read the PO file\n pofile = file(self.__name, 'r')\n lines = pofile.readlines( )\n pofile.close( )\n\n return lines", "def paragraphs(str):\n return [mark_safe(x) for x in para_list(str)]", "def getlines(self, n, m):\n return self.__contents[n:m]", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def current_document_text(self):\n return self.current_document.lines", "def find_bound_paragraph(c: Cmdr) -> tuple[str, list[str], str]:\n head, ins, tail = c.frame.body.getInsertLines()\n head_lines = g.splitLines(head)\n tail_lines = g.splitLines(tail)\n result = []\n insert_lines = g.splitLines(ins)\n para_lines = insert_lines + tail_lines\n # If the present line doesn't start a paragraph,\n # scan backward, adding trailing lines of head to ins.\n if insert_lines and not startsParagraph(insert_lines[0]):\n n = 0 # number of moved lines.\n for s in reversed(head_lines):\n if ends_paragraph(s) or single_line_paragraph(s):\n break\n elif startsParagraph(s):\n n += 1\n break\n else:\n n += 1\n if n > 0:\n para_lines = head_lines[-n :] + para_lines\n head_lines = head_lines[: -n]\n ended, started = False, False\n for i, s in enumerate(para_lines):\n if started:\n if ends_paragraph(s) or startsParagraph(s):\n ended = True\n break\n else:\n result.append(s)\n elif s.strip():\n result.append(s)\n started = True\n if ends_paragraph(s) or single_line_paragraph(s):\n i += 1\n ended = True\n break\n else:\n head_lines.append(s)\n if started:\n head = ''.join(head_lines)\n tail_lines = para_lines[i:] if ended else []\n tail = ''.join(tail_lines)\n return head, result, tail # string, list, string\n return None, None, None", "def read(self):\n path = os.path.expanduser(self.path)\n with open(path, encoding=\"utf-8\") as f:\n return f.read().splitlines()", "def read(self):\n dataset = Dataset()\n\n file_list = glob.glob(str(self.directory + \"/*.txt\"))\n\n for file_path in file_list:\n file_name = os.path.basename(file_path)\n\n docid, partid_prefix, = file_name.replace('.txt', '').split('-', 1)\n # partid_prefix not complete due to multiple part cration for a single .txt file\n\n if 'Abstract' in partid_prefix:\n is_abstract = True\n else:\n is_abstract = False\n\n with open(file_path, encoding='utf-8') as file:\n text_raw = file.read()\n\n text = text_raw.replace('** IGNORE LINE **\\n', '')\n paragraph_list = text.split('\\n\\n')\n\n # inital offset for raw_text\n tot_offset = text_raw.count('** IGNORE LINE **\\n') * 18\n offsets = [tot_offset]\n\n for i, text_part in enumerate(paragraph_list):\n # if text is empty (usually last text due to splitting of \"\\n\\n\")\n if text_part != \"\":\n partid = \"{}-p{}\".format(partid_prefix, i + 1)\n\n if docid in dataset:\n dataset.documents[docid].parts[partid] = Part(text_part, is_abstract=is_abstract)\n else:\n document = Document()\n document.parts[partid] = Part(text_part, is_abstract=is_abstract)\n dataset.documents[docid] = document\n\n # add offset for next paragraph\n tot_offset += len(text_part) + 2\n offsets.append(tot_offset)\n\n # to delete last element\n del offsets[-1]\n\n # annotations\n with open(file_path.replace('.txt', '.ann'), encoding='utf-8') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if row[0].startswith('T'):\n entity_type, start, end = row[1].split()\n start = int(start)\n end = int(end)\n text = row[2]\n\n partid = None\n part_index = None\n\n for i in range(len(offsets) - 1):\n if offsets[i+1] > start:\n part_index = i\n break\n\n if part_index is None:\n part_index = len(offsets) - 1\n\n partid = \"{}-p{}\".format(partid_prefix, part_index + 1)\n real_start = start - offsets[part_index]\n real_end = end - offsets[part_index]\n calc_ann_text = document.parts[partid].text[real_start : real_end]\n\n if calc_ann_text != text:\n print(\" ERROR\", docid, part_index, partid, start, offsets, real_start, \"\\n\\t\", text, \"\\n\\t\", calc_ann_text, \"\\n\\t\", document.parts[partid].text)\n\n if entity_type == 'mutation':\n ann = Entity(self.mut_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n elif entity_type == 'gene':\n ann = Entity(self.gene_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n return dataset", "def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr", "def get_paragraph(tag: Optional[Tag]) -> str:\n if tag is None:\n return \"\"\n\n paragraph = \"\\n\".join(p.text.strip() for p in tag.find_all(\"para\"))\n paragraph += \"\\n\"\n return paragraph", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def last10paragraphs(self):\n return Paragraph.objects.filter(story=self)[:10]", "def extract_lines(parent: str, begin: Predicate, end: Predicate) -> str:\n lines = parent.splitlines()\n\n begin_line = find_line(lines, begin)\n end_line = find_line(lines, end, begin_line+1)\n\n new_lines = lines[begin_line+1:end_line]\n\n return \"\\n\".join(new_lines)", "def get_lines():\n buf = vim.current.buffer\n return buf", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def get_text_lines(instText):\n\n # Find out which part this is\n part = instText.part\n # Get the necessary parameters: lng, ext, dir\n sLng = part.corpus.get_lng_display()\n sDir = part.dir\n sName = instText.fileName\n sFormat = instText.get_format_display()\n # Now try to get the information\n oBack = get_crpp_text(sLng, sDir, sFormat, sName)\n # Prepare what we return\n if oBack == None or oBack['status'] == 'error':\n return None\n else:\n return oBack", "def paragraph(lines) -> List[Tuple[str, Any]]:\n p = Paragraph.parse_lines(lines)\n acc = []\n for c in p.children:\n if type(c).__name__ == \"Directive\":\n if c.role == \"math\":\n acc.append(Math(c.value))\n else:\n acc.append(c)\n else:\n acc.append(c)\n p.children = acc\n return p", "def get_poem(self, p):\n poem = []\n number = self.int2roman[p] + \".\"\n next_number = self.int2roman[p + 1] + \".\"\n print(self.index[number])\n index1 = self.index[number][0]\n index2 = self.index[next_number][0]\n for i in range(index1 + 1, index2):\n if self.msgs[i] != \" \" and self.get_msg(i) != \"\":\n poem.append(self.get_msg(i))\n return poem", "def blockify(source):\n\n paragraphs = [\"\"]\n for line in source.strip().split(\"\\n\"):\n line = line.strip()\n if line: paragraphs[-1] += line + \" \"\n elif paragraphs[-1]: paragraphs.append(\"\")\n\n return paragraphs", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def get_plain_sentences(self, type):\n if type == \"translation\":\n fn = self.translationfile()\n elif type == \"source\":\n fn = self.sourcefile()\n elif type == \"reference\":\n fn = self.referencefile()\n else:\n raise ValueError\n with open(fn, \"r\") as f:\n lines = f.readlines()\n return lines", "def getDataParagraph(startpattern,stoppattern,datararray):\n output = []\n inparagraph = 'FALSE'\n lines=datararray\n for i in range(len(lines)):\n search_start=re.search(r'{0}'.format(startpattern),lines[i])\n if search_start is not None or inparagraph == 'TRUE':\n inparagraph = 'TRUE'\n lines[i] = lines[i].split('\\n')[0]\n if lines[i].startswith('*'):\n pass\n else:\n output.append(lines[i])\n search_stop=re.search(r'{0}'.format(stoppattern),lines[i])\n if search_stop is not None:\n return output\n pass", "def read(self):\n return self.readentries()", "def get_file_lines(filename):\n\n with open(filename, \"r\") as lines:\n lines = lines.readlines() # Saves list of each poem line in lines\n\n for _ in range(len(lines)):\n lines[_] = lines[_].rstrip() # Removes newline char from right-side end of each poem line\n\n return lines", "def get_processed_content(self, fn):\n fin = open(os.path.join(self.wiki_path, fn), 'rb')\n text = fin.read()\n fin.close()\n return (x for x in gensim.utils.tokenize(text, lowercase=True, deacc=True, errors=\"ignore\") if x not in STOPLIST)", "def read_nq_examples(input_file, tfidf_dict, is_training, args):\n all_examples = []\n positive_paragraphs = 0\n negative_paragraphs = 0\n logger.info(\"Reading: %s\", input_file)\n with open(input_file, \"r\") as f:\n for index, line in tqdm(enumerate(f)):\n new_examples = create_example(line, tfidf_dict, is_training, args)\n if is_training:\n for example in new_examples:\n if example['answer_type'] == AnswerType['UNKNOWN']:\n negative_paragraphs += 1\n else:\n positive_paragraphs += 1\n if index % 5000 == 0:\n print('Positive paragraphs:', positive_paragraphs, 'Negative paragraphs:', negative_paragraphs)\n all_examples.extend(new_examples)\n return all_examples", "def extract_sentences(paper_path, para_yes):\n\n f = open(paper_path, 'rb')\n doc = Document.from_file(f, readers=[HtmlReader()])\n\n sen_yes_arr = list()\n sen_no_arr = list()\n\n elem_all = np.arange(0,len(doc))\n para_no = np.delete(elem_all, para_yes)\n\n for i in para_no:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_no_arr.append(sentence)\n\n for i in para_yes:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_yes_arr.append(sentence)\n\n\n return sen_yes_arr, sen_no_arr", "def protare(self, verbosity=0, lazyParsing=True):\n\n return self.read(verbosity=verbosity, lazyParsing=lazyParsing)", "def get_marked_paragraphs(doc):\n\n\tres = [[x] for x in doc.paragraphs if x.text != ''] # получаем все непустые параграфы\n\n\tfor i in range(len(res)):\n\t\tq = [] # подготавливаем список маркеров\n\t\tfor k in range(len(res[i][0].runs)):\n\t\t\tif \"<>\" in res[i][0].runs[k].text: # если в тексте каретки встречается маркер\n\t\t\t\tq.append(res[i][0].runs[k])\n\t\t\telif \"<\" in res[i][0].runs[k].text and \">\" in res[i][0].runs[k+1].text: # сли маркер разделен на две сосендние каретки\n\t\t\t\tres[i][0].runs[k+1].clear() # удаляем содержимое второй каретки\n\t\t\t\tq.append(res[i][0].runs[k]) # и сохраняем в итоговый список первую \n\t\tif q != []: # если найдены маркеры\n\t\t\tres[i].append(q)\n\n\treturn res", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def get_pages(self):\n cur_page = Page(\"slide \" + str(self.page_number + 1))\n\n print(self.file)\n sys.exit(1)\n\n for line in self.file:\n line = line.strip()\n\n if line.startswith('--##'):\n pass\n # ignore comments\n elif line.startswith('--newpage'):\n self.pages.append(cur_page)\n self.page_number += 1\n name = line.replace(\"--newpage\", '').strip()\n if name == \"\":\n name = \"slide \" + str(self.page_number + 1)\n\n cur_page = Page(name)\n else:\n cur_page.add_line(line)\n\n self.pages.append(cur_page)\n return self.pages", "def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_extension = path.split('.')[-1]\n raise Exception(f'Cannot ingest {file_extension} exception')\n try:\n quotes = []\n # Create a temporary .txt file\n temp = f'./static/{random.randint(0,100000)}.txt'\n #use pdftotext to read pdf data to the txt file\n subprocess.call(['pdftotext', path, temp])\n with open(temp, 'r') as file:\n for line in file:\n if line == \"\\n\":\n break\n quote_line = line.strip('\\n').strip()\n # print(quote_line[0])\n if line != \"\":\n quote_list = quote_line.split(' - ')\n quote = QuoteModel(quote_list[0], quote_list[1])\n quotes.append(quote)\n # Remove the temporary txt file\n os.remove(temp)\n return quotes\n except:\n print('Issue parsing pdf file')", "def readlines(self, cr=1):\n mode = \"r\"\n\n if not cr:\n content = self.read(mode)\n return content.split(\"\\n\")\n else:\n f = self.open(mode)\n try:\n return f.readlines()\n finally:\n f.close()", "def read_pe(self, image_base, size=0):\n # Calculate the size of the in-memory PE\n self.ret = b\"\"\n\n size = self._calc_pe_size(image_base)\n\n if size == 0:\n return self.ret\n\n # Read PE data from IDA database\n self.ret = self.get_bytes(image_base, size)\n return self.ret", "def paragraphs(value):\n paras = re.split(r'[\\r\\n]+', value)\n paras = ['<p>%s</p>' % p.strip() for p in paras]\n return '\\n'.join(paras)", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read(self):\n return list(self.pile_list)", "def read_file(self, file_descriptor):\n parsers = [pyocr.builders._WordHTMLParser(), pyocr.builders._LineHTMLParser()]\n html_str = file_descriptor.read()\n\n for p in parsers:\n p.feed(html_str)\n if len(p.boxes) > 0:\n last_box = p.boxes[-1]\n if last_box.content == pyocr.util.to_unicode(\"\"):\n # some parser leave an empty box at the end\n p.boxes.pop(-1)\n return p.boxes\n return []", "async def readlines(self, hint=-1):\n if 'b' in self._mode:\n raise APIException(\n \"readline on a binary file is not permitted: {}\".format(\n self._uri)\n )\n # read the entire file in and decode it\n lines = await self.read().decode().split(\"\\n\")\n return lines", "def read_document(self):\n words = self.word_runner()\n word = \"press space to start\"\n orp_ind = 13\n try:\n while True:\n time.sleep(60 / self.wpm)\n\n if self.is_reading:\n word = next(words)\n orp_ind = int(self.orp_index(word))\n\n yield (word, orp_ind)\n except StopIteration:\n pass\n finally:\n del words", "def read(self, filename):\n\n # TODO: use markdown reader to parse the reveal.js markdown\n # github.com/danielfrg/pelican-ipynb/blob/master/markup.py#L62\n reader = MarkdownReader(self.settings)\n md_content, metadata = reader.read(filename)\n\n # TODO: using the markdown reader converts the file contents to HTML,\n # but we just want plain text because pandoc should be converting it\n # instead. The trouble is, we also want to get the metadata\n\n extracmd = \"\"\n\n if \"theme\" in metadata:\n extracmd = extracmd + \" \" + \"--variable theme=%s\" % metadata[\"theme\"]\n\n if \"revealoptions\" in metadata:\n if \"transition\" in metadata[\"revealoptions\"]:\n extracmd = (\n extracmd\n + \" \"\n + \"--variable transition=%s\"\n % metadata[\"revealoptions\"][\"transition\"]\n )\n\n command = f\"pandoc --to revealjs -f markdown {extracmd} {filename}\"\n\n # Define template for Pelican\n metadata[\"template\"] = \"revealmd\"\n\n p = subprocess.Popen(\n command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n try:\n stdout, stderr = p.communicate(str.encode(\"utf8\"))\n except OSError:\n raise RuntimeError(\n 'Pandoc died with exitcode \"%s\" during conversion.' % p.returncode\n )\n\n revealjs_content = stdout.decode(\"utf8\")\n\n # Patch revealjs_content to convert 'back' \"{\" and \"}\"\n returntext = revealjs_content.replace(\"%7B\", \"{\").replace(\"%7D\", \"}\")\n\n return returntext, metadata", "def ExtractText(self, selector):\n xpaths = map(self.tree.xpath, selector)\n elements = list(chain.from_iterable(xpaths))\n paragraphs = [e.text_content() for e in elements]\n paragraphs = [s.strip() for s in paragraphs if s and not s == ' ']\n\n return paragraphs", "def get_lines(self):\n return self.split('\\n')", "def contents(self, n, m):\n str = \"\"\n subset = self.getlines(n, m)\n for line in subset:\n str = str + line + \"\\n\"\n return str.rstrip(\"\\n\")", "def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)", "def lines(self):\n return lines(self.startEA, self.endEA)", "def read():\n # TODO", "def split_pdf_file(filename: str) -> List[str]:\n if not filename.endswith('.pdf'):\n raise ValueError('File extension must be pdf')\n\n text_lines: List[str] = []\n with pdfplumber.open(filename) as pdf:\n for page in pdf.pages:\n page = page.extract_text()\n if page:\n text_lines.extend(page.split('\\n'))\n return drop_empty_lines(text_lines)", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def _chunklines(self):\r\n text = self.textwnd.toPlainText()\r\n lines_in_chunk = len(text.split(\"\\n\"))\r\n logger.debug(\"Lines in chunk: {}\".format(lines_in_chunk))\r\n return lines_in_chunk", "def read_file(self) -> PSMList:\n return PSMList(psm_list=[psm for psm in self])", "def readP(path, encoding='iso-8859-1', n=0):\n with open(path, encoding=encoding) as f:\n raw = [x.strip() for x in f if x]\n if n:\n raw = [x for x in raw if len(x) <= n]\n return raw", "def split_text_into_paragraphs(text: str) -> List[str]:\n text_aux = text.strip()\n paragraphs = text_aux.split('\\n\\n') # Strip any leading whitespaces\n\n for p in paragraphs:\n p = p.strip()\n\n return [p.strip() for p in paragraphs if len(p) > 0] # Don't count empty paragraphs", "def readMultipleFileLinesAndPositions(filePath,startPosition=None, bytesToRead=1): \n \n f = open(filePath, 'rb') \n \n if not startPosition is None: \n f.seek(startPosition) \n \n lines = f.readlines(bytesToRead) \n position = f.tell() \n \n f.close() \n \n return lines, position", "def extract_ppt(self, filename):\n prs = Presentation(filename)\n\n sents = []\n for slide in prs.slides:\n for shape in slide.shapes:\n sents.append(shape.text)\n\n text = \"\"\n for sent in sents:\n for bullet in sent.split('\\n'):\n bullstr = bullet.strip()\n if len(bullstr) > 0:\n text += bullstr\n if bullstr[-1] != '.' and bullstr[-1] != '!' and bullstr[-1] != '?':\n text += '.'\n text += ' '\n\n return text", "def read(self):\n lines = self.readlines()\n if lines:\n try:\n return ''.join(lines)\n except TypeError:\n return ''.join(force_text(line) for line in lines)\n else:\n return None", "def readlines(self) -> List[bytes]:\r\n buf = []\r\n line = self.readline()\r\n while line:\r\n buf.append(line)\r\n line = self.readline()\r\n return buf", "def load_sentences(path):\n sentences = []\n sentence = []\n num = 0\n with codecs.open(path, 'r', 'utf8') as fread:\n # n_lines = len(fread)\n print(\"Read from {:s}\".format(path))\n # pbar = progressbar.ProgressBar(max_value=n_lines)\n for line_idx, line in enumerate(fread):\n assert line_idx==num,'ER'\n num += 1\n\n line = line.rstrip()\n # print(list(line))\n if not line: #Update: only deal with space between sentences\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:# remove the DOCstart\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":#Update: this part is never used in Chinese ner!\n line = \"$\" + line[1:]\n word = line.split()\n # word[0] = \" \"\n else:\n word= line.split()\n assert len(word) >= 2, ([word[0]])\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n\n return sentences", "def read_doc(self):\n self.data_read[:] = []\n for para in self.document.paragraphs:\n text = para.text\n # skip blank lines\n if text.strip():\n # remove duplicated spaces\n text = ' '.join(text.split())\n # for older versions of final CAPA's\n self.fill_project_info(text, new_format=False)\n self.data_read.append(text)\n\n # Constant in old & new report format\n # Batch/Project name\n # Lead(s)'s name\n # Reported date\n for i in range(0, len(self.data_read)):\n if next((x for x in self.leads if x in self.data_read[i]), None):\n self.project_info.update({'Project Name': self.data_read[i - 1]})\n self.project_info.update({'Lead(s)': self.data_read[i]})\n self.project_info.update({'Date Reported': self.data_read[i + 1]})\n break", "def _iter_from_disk(self):\n self.f.seek(0, 0) # relative to start\n for line in self.f:\n yield line\n self.f.seek(0, 2) # relative to end", "def read_txt(path):\n \n with open(path, \"r\") as f:\n return f.read().splitlines()", "def readInputFileElements(inputFileName, sentence=True, L=None):\n print(\"Read by elements (per sentences/paragraphs) in a document, each of these elements is grouped by document.\")\n data = []\n f = open(inputFileName, 'r', encoding='utf8')\n for line in f:\n if L is not None:\n line = line[:L] # remove title?\n paragraphs = line.strip().split(EOP)\n for paragraph in paragraphs:\n if sentence:\n for sent in paragraph.split(SNT):\n data.append(sent)\n else:\n data.append(paragraph)\n return data", "def browse(self):\n res = \"PID[\" + str(PID) + \"] \"\n for (start, offset) in \\\n self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]:\n seg = Segment(self.__content[start:start + offset])\n res = res + \"\\n \" + str(seg)\n return res", "def parse_document(file):\n lines = file.read_text(encoding='utf-8').split('\\n')\n # If the \"#\" character is present, it means the line contains the\n # document original link. So, if the # is not present,\n # we have a normal paragraph to append to the list.\n return [line for line in lines if line != '' and '#' not in line]", "def segment_paragraphs(root_el, cites=[]):\n from capdb.models import Citation\n\n last_el_ends_mid_sentence = False\n join_with_last_el = False\n html_to_prepend_to_next_el = ''\n\n # build a lookup like {\"935 F.3d\": 1, \"123 Mass.\": 2}\n reporter_indexes = {}\n for i, cite in enumerate(Citation.sorted_by_type(cites)):\n eyecite_cite = next(extract_citations_from_text(cite.cite), None)\n if eyecite_cite:\n volume = eyecite_cite.groups['volume']\n reporter = eyecite_cite.groups['reporter']\n reporter_indexes[f\"{volume} {reporter}\"] = i+1\n\n # special case -- \"[134 Hawai'i 89]\" is a page number for \"134 Haw. 86\"\n if reporter == 'Haw.':\n reporter_indexes[f\"{volume} Hawai'i\"] = i + 1\n\n # process each paragraph\n for el_pq in PyQuery(root_el)('root').children().items():\n el = el_pq[0]\n if el.tag == 'header-end':\n continue\n\n html = inner_html(el)\n page_label = None\n exact_match = False\n index = 1\n\n # clean el whitespace\n clean_html = re.sub(r'\\s+|^<br>|<br>$', ' ', html).strip()\n if not clean_html:\n el_pq.remove()\n continue\n\n # strip tags to handle examples like\n # \"<p><strong>[16 N.Y.3d 274] <strong> <p/></strong></strong> <p> <strong> [945 N.E.2d 484]</strong> </p> <p> <strong>OPINION OF THE COURT</strong> </p></p>\"\n # in NE2d/945/945ne2d484.xml\n html_no_tags = strip_tags(clean_html).strip()\n\n # check for 'Page 123'\n m = re.match(r'Page (\\d+)$', html_no_tags)\n if m:\n page_label = make_page_label(m[1])\n exact_match = True\n\n # check for '[123 Mass. 456]'\n else:\n m = re.search(r\"\\[(?P<volume>\\d+) (?P<reporter>[A-Z][A-Za-z0-9 .']+) (?P<page>\\d+)\\]\", html_no_tags)\n if m:\n vol_reporter = f\"{m['volume']} {m['reporter']}\"\n if vol_reporter in reporter_indexes:\n index = reporter_indexes[vol_reporter]\n is_valid_reporter = True\n else:\n is_valid_reporter = False\n exact_match = m[0] == html_no_tags\n if exact_match or is_valid_reporter:\n page_label = make_page_label(m['page'], index)\n\n # handle page label found\n if page_label:\n clean_html = clean_html.replace(escape(m[0]), page_label)\n\n if exact_match:\n if last_el_ends_mid_sentence:\n join_with_last_el = True\n html_to_prepend_to_next_el += clean_html\n el_pq.remove()\n continue\n\n if html_to_prepend_to_next_el:\n clean_html = html_to_prepend_to_next_el + clean_html\n html_to_prepend_to_next_el = ''\n\n if join_with_last_el:\n join_with_last_el = False\n prev_el = el_pq.prev()\n if prev_el[0].tag == el_pq[0].tag:\n prev_el.append(('' if prev_el.text().endswith('-') else ' ')+clean_html)\n el_pq.remove()\n continue\n\n last_el_ends_mid_sentence = bool(mid_sentence_re.search(html_no_tags))\n\n if clean_html != html:\n el_pq.html(clean_html)", "def read(*p):\n with open(os.path.join(*p), 'r') as fi:\n return fi.read()", "def lines(self):\n return self.lines", "def _readline_ins(self):\n if self._ins_filehandle is None:\n if not os.path.exists(self._ins_filename):\n raise Exception(\n \"instruction file '{0}' not found\".format(self._ins_filename)\n )\n self._ins_filehandle = open(self._ins_filename, \"r\")\n line = self._ins_filehandle.readline()\n self._ins_linecount += 1\n if line == \"\":\n return None\n self._last_line = line\n # check for spaces in between the markers - this gets ugly\n line = line.lower()\n if self._marker is not None and self._marker in line:\n\n # def find_all(a_str, sub):\n # start = 0\n # while True:\n # start = a_str.find(sub, start)\n # if start == -1:\n # return\n # yield start\n # start += len(sub)\n # poss speedup using regex\n midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]\n # midx = list(find_all(line, self._marker))\n midx.append(len(line))\n first = line[: midx[0]].strip()\n tokens = []\n if len(first) > 0:\n # tokens.append(first)\n tokens.extend([f.strip() for f in first.split()])\n for idx in range(1, len(midx) - 1, 2):\n mstr = line[midx[idx - 1] : midx[idx] + 1]\n ostr = line[midx[idx] + 1 : midx[idx + 1]]\n tokens.append(mstr)\n tokens.extend(ostr.split())\n else:\n tokens = line.strip().split()\n return tokens", "def get_file_content(self):\n after = self.file.revisions.filter(\n revision_number__gt=self.revision_number)\n content = self.file.content\n\n for revision in after:\n patch = dmp.patch_fromText(revision.diff)\n content = dmp.patch_apply(patch, content)[0]\n\n return content", "def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()", "def getLines(self, **cmdKwargs):\n assert 'args' in cmdKwargs;\n assert len(cmdKwargs.keys())==1;\n import subprocess;\n popen = subprocess.Popen(args=cmdKwargs['args'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n lines = iter(popen.stdout.readline, b\"\") \n return lines;", "def __init__(self, number, title, paragraphs):\n self.number = number\n self.title = title\n self.paragraphs = []\n for paragraph_lines in paragraphs:\n new_pragraph = Paragraph.Paragraph(paragraph_lines)\n self.paragraphs.append(new_pragraph)", "def get_formatted_docs(pages, paragraph_size = 0.33):\r\n formatted_docs = {}\r\n paragraph_page_idxs = {}\r\n paragraphs = []\r\n for page_num in pages.keys():\r\n page = pages[page_num]\r\n page = re.sub('-[\\n\\r\\t\\s]+', '', page) # words broken by line break\r\n page = re.sub('[\\n\\r\\t\\s]+', ' ', page) # remove line break, tabs, whitespaces\r\n # build paragraphs\r\n page = page.split()\r\n k = int(len(page)*paragraph_size)\r\n if k < 1:\r\n paragraphs += [(page_num, ' '.join(page))]\r\n else:\r\n paragraphs += [(page_num,' '.join(page[i:i+k])) for i in range(0, len(page), k)]\r\n for i in range(len(paragraphs)):\r\n formatted_docs[i] = paragraphs[i][1]\r\n paragraph_page_idxs[i] = paragraphs[i][0]\r\n return (formatted_docs, paragraph_page_idxs)" ]
[ "0.6528536", "0.64675736", "0.6323226", "0.6145701", "0.61151904", "0.6101688", "0.6096735", "0.60733026", "0.5850335", "0.5826958", "0.57282406", "0.57275575", "0.5669418", "0.5661601", "0.5554305", "0.5551181", "0.55444384", "0.547488", "0.5394992", "0.53804976", "0.5364042", "0.53162694", "0.5299351", "0.52904165", "0.5289129", "0.52817494", "0.5271727", "0.517947", "0.5161774", "0.51570076", "0.5156572", "0.5143503", "0.5139062", "0.5119693", "0.51177603", "0.51168", "0.5111047", "0.51092243", "0.5109058", "0.50860184", "0.50820774", "0.5079469", "0.50789297", "0.5068163", "0.50509685", "0.5032105", "0.50214", "0.49848148", "0.49683863", "0.49537292", "0.495164", "0.4943573", "0.4943192", "0.49351722", "0.49204174", "0.49171105", "0.49170965", "0.49161336", "0.49152628", "0.4911845", "0.49118334", "0.49105275", "0.4906811", "0.49048516", "0.49032578", "0.49007195", "0.48971733", "0.48944598", "0.4877026", "0.48764524", "0.4860325", "0.4857185", "0.48527297", "0.4849994", "0.4837809", "0.4837809", "0.48291925", "0.4825891", "0.48226595", "0.4813556", "0.48025367", "0.4801826", "0.4800526", "0.4798738", "0.47978425", "0.47968304", "0.47921297", "0.47912142", "0.47850674", "0.47719395", "0.47689766", "0.47667423", "0.47663492", "0.47526902", "0.4739199", "0.47305804", "0.47300282", "0.4729811", "0.47278848", "0.47272182" ]
0.5878271
8
Returns an interator of sentences being read from disk.
def get_sentences(self, batch=None): # loop through the paragraph stream for this document database for paragraph in self.get_paragraphs(batch): # loop through the sentences for sentence in paragraph["sentences"]: # yield the individual tokens yield sentence["tokens"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sentences(path):\n sentences = []\n sentence = []\n num = 0\n with codecs.open(path, 'r', 'utf8') as fread:\n # n_lines = len(fread)\n print(\"Read from {:s}\".format(path))\n # pbar = progressbar.ProgressBar(max_value=n_lines)\n for line_idx, line in enumerate(fread):\n assert line_idx==num,'ER'\n num += 1\n\n line = line.rstrip()\n # print(list(line))\n if not line: #Update: only deal with space between sentences\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:# remove the DOCstart\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":#Update: this part is never used in Chinese ner!\n line = \"$\" + line[1:]\n word = line.split()\n # word[0] = \" \"\n else:\n word= line.split()\n assert len(word) >= 2, ([word[0]])\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n\n return sentences", "def get_enron_sentences(self):\n helper._print_subheader('Reading ' + directories.ENRON_TRAIN_SENTENCES_TXT_PATH + '...')\n with open(directories.ENRON_TRAIN_SENTENCES_TXT_PATH, 'r', encoding='utf-8') as txt_file:\n for index, line in enumerate(txt_file):\n if index % 1000000 == 0 and index != 0:\n helper._print(f'{index} sentences read')\n break\n preproccesed_line = simple_preprocess(line)\n if preproccesed_line != []:\n yield preproccesed_line\n helper._print(f'{index} sentences read')\n helper._print_subheader('Done reading Enron email data!')", "def get_plain_sentences(self, type):\n if type == \"translation\":\n fn = self.translationfile()\n elif type == \"source\":\n fn = self.sourcefile()\n elif type == \"reference\":\n fn = self.referencefile()\n else:\n raise ValueError\n with open(fn, \"r\") as f:\n lines = f.readlines()\n return lines", "def open_text_file(filepath):\n sentences = []\n sentencemanager = nmea.NMEASentenceManager()\n for line in open_file_generator(filepath):\n sentencemanager.process_sentence(line)\n sentences.append(line)\n return sentencemanager, sentences", "def readWhole(self):\n try:\n if os.path.isfile(self.filename) == False:\n raise dse.DocumentStreamError(\"Not a file!\")\n except dse.DocumentStreamError as E:\n print(E.data)\n exit()\n\n f = open(self.filename, 'r')\n\n fileString = f.read()\n f.close()\n\n #fileString = [c for c in fileString if c not in ['\\n', '\\t']] # Remove all returns in the string\n\n sentenceList = []\n sent = ''\n spaceState = False\n\n\n ### If char is .!?; or new line, append sentence to sentenceList\n ### and reset sentence to empty string.\n\n for char in fileString:\n if char in ['\\n', '\\t']:\n char = ' '\n\n if char == ' ':\n if spaceState == True and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n elif spaceState == False:\n sent += char\n spaceState = True\n else:\n spaceState = False\n sent += char\n if char in '.!?;' and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n\n if sent != '':\n sentenceList.append(sentence.Sentence(sent))\n\n ### Handles the case that a sentence begins or ends with a space character.\n '''\n for i in sentenceList:\n if i.sentence[0] == ' ':\n i = sentence.Sentence(i.sentence[1:])\n if i.sentence[-1] == ' ':\n i = sentence.Sentence(i.sentence[:-1])\n '''\n\n return sentenceList", "def read_file(input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n sentences = f.read().splitlines()\n return sentences", "def read_article_2(filename):\n file = open(filename, \"r\")\n filedata = file.readlines()\n sentences = sent_tokenize(filedata[0])\n return sentences", "def _read_sentences(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return [sentence.split() for sentence in f.read().split('\\n')]", "def extract_sentences_indexed_files(self):\n pass", "def load_sentences(path, zeros):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def load_sentences(path, lower, zeros=True):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def gather_sentences(self):\n sentences = Sentence.objects.all()\n return sentences", "def sentences_from_file(this_class, filename):\n # Note that the native method below leaks. We work around this\n # by acquiring its pointer in __init__\n sentReps = parser.sentRepsFromFile(filename)\n return list(map(this_class, sentReps))", "def load_data_sentences(dirname):\n sentence_list = []\n for fname in os.listdir(dirname):\n with open(os.path.join(dirname, fname)) as file:\n #sentence_list.append(gensim.models.word2vec.LineSentence(file))\n sentence_list.append(file)\n return sentence_list", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def read_sentences():\r\n f = open(\"data.txt\", \"r\")\r\n gram = f.read().splitlines()\r\n gram = [sentence for sentence in gram if sentence != \"\"]\r\n return gram", "def sents(self):\n\n text = str()\n for file in os.listdir(self.path):\n # checks if the given path contains a text file and opens it\n if file.endswith(\".txt\"):\n with open(self.path + \"/\" + file) as connection:\n text += connection.read()\n\n # tokenizes the text to sentences and tokenizes the tokenized sentences to words\n sentences_list = nltk.sent_tokenize(text)\n word_list = [nltk.word_tokenize(sent) for sent in sentences_list]\n\n return word_list", "def generate_sentences(self, count=5):\n\n with self.open_text_data() as f:\n result = self.read_sentences(f, count=count)\n return result", "def tokenize(self, path):\n assert os.path.exists(path)\n with open(path, 'r') as f:\n sentences = []\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences.append(sentence.split())\n self.data = sentences", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def extract_sentences(file_path):\n\n with open(file_path, \"r\") as file:\n\n lines = list()\n\n for line in file:\n line_stripped = line.strip()\n\n if line_stripped == \"\":\n continue\n\n lines.append(line_stripped)\n\n text = \" \".join(lines)\n sentences = token_to_sentence(text)\n\n return sentences", "def get_sentences(self):\n return [s for s in self.text.split('\\n')]", "def getSentences(self, ser):\n\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sentencesSer = ser.apply(tokenizer.tokenize)\n return sentencesSer", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def _split_into_sentences(self):\n EOL_CHARS = \".!?\"\n allSentences = []\n with open(self.filename, 'r') as f:\n sentence = []\n for line in f:\n try:\n word, label = line.split()\n except:\n continue\n #no the end of the sentence\n sentence += [word]\n intLabel = 1 if label == \"PERSON\" else 0\n self.labels += [intLabel]\n if word in EOL_CHARS:\n allSentences += [sentence]\n sentence = []\n\n #in case the last sentence doesn't end with proper punctuation!\n if sentence != []:\n allSentences += [sentence]\n self.allSentences = allSentences", "def get_sample_sent(file_path,sent_number):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n \n with codecs.open(file_path, encoding='utf_8') as f:\n for sentence in it.islice(f,sent_number, sent_number+1):\n return sentence.replace('\\n', '')", "def _load_sentence_list(self, path):\n\n result = {}\n\n for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=3):\n if self.include_languages is None or entry[1] in self.include_languages:\n result[entry[0]] = entry[1:]\n\n return result", "def read_sentences(snt_file):\n\n sentences = open(snt_file).readlines()\n em = []\n en = []\n i = 0\n\n for line in sentences:\n line = line.split()\n if line[0] != '1':\n em_snt = []\n en_snt = []\n if i % 3 == 1:\n for word in line:\n en_snt += [int(word)]\n en.append(en_snt)\n elif i % 3 == 2:\n for word in line:\n em_snt += [int(word)]\n em.append(em_snt)\n i += 1\n return em, en", "def get_sentences(self):\n return self._sentences", "def make_sentences(self):\n\n if self.document == None:\n return\n\n sent = sent_tokenize(self.document) # contains raw sentences\n\n\n # Create parameters for NER and Dependency Parsing a\n # and pass it to the sentence objcet\n\n # set config file\n config = CP.RawConfigParser()\n config = config\n config.read('config.py')\n\n # Server for dependency parsing\n\n server = ServerProxy(JsonRpc20(),TransportTcpIp(addr=(\"127.0.0.1\", 8080), timeout=200.0))\n\n # Parameters for Named entitye recognition\n\n # get the classifier and tagger location from config file\n tagger = config.get('NER','tagger') # gets the path of the stanford tagger\n classifier = config.get('NER','classifier') # gets the path of the stanford classifier\n st = StanfordNERTagger(classifier,tagger)\n for i in range(len(sent)):\n s = Sentence(sent[i],i,server, st, 'test')\n self.sentences.append(s)", "def gather_sentences(self):\n if self.company_name:\n companies = Company.objects.filter(name__contains=self.company_name)\n dpefs = DPEF.objects.filter(company__in=companies)\n sentences = Sentence.objects.filter(dpef__in=dpefs).all()\n else:\n sentences = Sentence.objects.none() # TODO: can be set to none later when all works\n return sentences", "def sentences_for_dir(path='./',separate=True,gzipped=True):\n for filename in cowfiles(path):\n for metadata, data in sentence_generator(filename,separate,gzipped):\n yield metadata, data", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def read_file(path, tok=False):\n with open_file(path) as f:\n for line in f.readlines():\n words = split_sentence(line.strip(), tok)\n yield words", "def get_sentences(text):\n \n return text.split('.')", "def read_article(file_name):\n file = open(file_name, \"r\")\n filedata = file.readlines()\n sentences = [] \n for sentence in filedata:\n print(\"\\n{} text: \\n{}\".format(file_name,sentence))\n sentences.append(sentence.replace(\"[^a-zA-Z]\", \" \").split(\" \")) # filter charachter only\n \n return sentences", "def tokenize(self, path):\n dropped = 0\n with open(path, 'r') as f:\n linecount = 0\n lines = []\n for line in f:\n linecount += 1\n if self.lowercase:\n words = line[:-1].lower().strip().split(\" \")\n else:\n words = line[:-1].strip().split(\" \")\n if len(words) > self.maxlen:\n dropped += 1\n continue\n words = ['<sos>'] + words\n words += ['<eos>']\n # vectorize\n vocab = self.dictionary.word2idx\n unk_idx = vocab['<oov>']\n indices = [vocab[w] if w in vocab else unk_idx for w in words]\n lines.append(indices)\n\n print(\"Number of sentences dropped from {}: {} out of {} total\".\n format(path, dropped, linecount))\n return lines", "def __iter__(self):\n try:\n # Assume it is a file-like object and try treating it as such\n # Things that don't have seek will trigger an exception\n self.source.seek(0)\n for item_no, line in enumerate(self.source):\n yield LabeledSentence(utils.to_unicode(line).split(), ['SENT_%s' % item_no])\n except AttributeError:\n # If it didn't work like a file, use it as a string filename\n with utils.smart_open(self.source) as fin:\n for item_no, line in enumerate(fin):\n yield LabeledSentence(utils.to_unicode(line).split(), ['SENT_%s' % item_no])", "def reader(self):\n @contextmanager\n def generator(data):\n \"\"\"\n Args:\n data (str): could be a filename or the text to tokenize.\n Returns:\n a context manager that can be used in a `with` contruct,\n yielding each line of the tokenized `data`.\n \"\"\"\n if not os.path.exists(data):\n yield self.format(self.predict(data))\n else:\n with open(data) as f:\n yield self.format(self.predict(f.read()))\n return generator", "def read(self):\n dataset = Dataset()\n\n file_list = glob.glob(str(self.directory + \"/*.txt\"))\n\n for file_path in file_list:\n file_name = os.path.basename(file_path)\n\n docid, partid_prefix, = file_name.replace('.txt', '').split('-', 1)\n # partid_prefix not complete due to multiple part cration for a single .txt file\n\n if 'Abstract' in partid_prefix:\n is_abstract = True\n else:\n is_abstract = False\n\n with open(file_path, encoding='utf-8') as file:\n text_raw = file.read()\n\n text = text_raw.replace('** IGNORE LINE **\\n', '')\n paragraph_list = text.split('\\n\\n')\n\n # inital offset for raw_text\n tot_offset = text_raw.count('** IGNORE LINE **\\n') * 18\n offsets = [tot_offset]\n\n for i, text_part in enumerate(paragraph_list):\n # if text is empty (usually last text due to splitting of \"\\n\\n\")\n if text_part != \"\":\n partid = \"{}-p{}\".format(partid_prefix, i + 1)\n\n if docid in dataset:\n dataset.documents[docid].parts[partid] = Part(text_part, is_abstract=is_abstract)\n else:\n document = Document()\n document.parts[partid] = Part(text_part, is_abstract=is_abstract)\n dataset.documents[docid] = document\n\n # add offset for next paragraph\n tot_offset += len(text_part) + 2\n offsets.append(tot_offset)\n\n # to delete last element\n del offsets[-1]\n\n # annotations\n with open(file_path.replace('.txt', '.ann'), encoding='utf-8') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if row[0].startswith('T'):\n entity_type, start, end = row[1].split()\n start = int(start)\n end = int(end)\n text = row[2]\n\n partid = None\n part_index = None\n\n for i in range(len(offsets) - 1):\n if offsets[i+1] > start:\n part_index = i\n break\n\n if part_index is None:\n part_index = len(offsets) - 1\n\n partid = \"{}-p{}\".format(partid_prefix, part_index + 1)\n real_start = start - offsets[part_index]\n real_end = end - offsets[part_index]\n calc_ann_text = document.parts[partid].text[real_start : real_end]\n\n if calc_ann_text != text:\n print(\" ERROR\", docid, part_index, partid, start, offsets, real_start, \"\\n\\t\", text, \"\\n\\t\", calc_ann_text, \"\\n\\t\", document.parts[partid].text)\n\n if entity_type == 'mutation':\n ann = Entity(self.mut_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n elif entity_type == 'gene':\n ann = Entity(self.gene_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n return dataset", "def read_sentences(f):\n with open(f, 'r') as conll_file:\n s = [ROOT]\n for line in conll_file:\n if line.strip() and not line.startswith('#'):\n s.append(read_token(line))\n elif len(s) != 1:\n yield s\n s = [ROOT]\n if len(s) != 1: # file ended without a new line at the end\n yield s", "def readFile(self,filepath):\n logger.info(\"reading \"+filepath)\n data_file = Pickle_Helper(filepath)\n\n all_data = data_file.load()[\"data\"]\n skipped = 0\n for data in all_data:\n if self.read_sentence(data):\n pass\n else:\n skipped = skipped + 1\n logger.warn(\"{} has non relations existed\".format(data[\"example_id\"]))\n\n logger.info((\"done reading {}, {} sentences processed, {} is skipped because of no relation\").format(filepath, str(len(all_data)), skipped))\n return len(all_data)", "def tokenize(self, path):\n assert os.path.exists(path)\n # add the start of sentence token\n sentence_sep = [BOS]\n with open(path, 'r') as f:\n sentences = [BOS]\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences += sentence.split() + sentence_sep\n # split into list of tokens\n self.data = sentences", "def get_kindle_sentences() -> List[str]:\n reviews = dl.load_json_from_file(\"../data/kindle_500.json\")\n return [sent[1] for review in reviews for sent in enumerate(review)]", "def sentences(self, tag=False, tag_method=None):\n self.__set_text_node(self.root_)\n sentence_nodes = filter(lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == 's',\n list(self.text_node.childNodes))\n sentences = []\n for s in sentence_nodes:\n current = []\n TimeMLDoc.__get_text(s, current, False)\n #print(current)\n if not tag:\n sentences.append(''.join([ c[0] for c in current]))\n else:\n sentences.append(tag_method(current))\n return sentences", "def sents(self, fileids=None, categories=None):\n for paragraph in self.paras(fileids, categories):\n for sentence in sent_tokenize(paragraph, language='russian'):\n yield sentence", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def get_processed_content(self, fn):\n fin = open(os.path.join(self.wiki_path, fn), 'rb')\n text = fin.read()\n fin.close()\n return (x for x in gensim.utils.tokenize(text, lowercase=True, deacc=True, errors=\"ignore\") if x not in STOPLIST)", "def get_processed_sentences(self):\n\n self.__resolve_coreference()\n self.__tokenize_sentences()\n\n return self.tokenized_sentences", "def load_collection_sentences(collection, n):\n files = os.listdir(collection)\n files_sentences = []\n for f in files:\n files_sentences.append(load_file_sentences(collection + \"/\" + f,f))\n n -= 1\n if n == 0:\n break\n return files_sentences", "def get_sentences(self):\n for tree in self.tree_generator():\n yield tree[\"title\"] + \" \" + tree[\"selftext\"]\n for _, comment in tree[\"comments\"].items():\n yield comment[\"body\"]", "def collect_sentences(self):\n sentences = []\n for document in self.documents:\n for sentence_token in document.sentences:\n sentences.append(sentence_token)\n return sentences", "def read_data(max_size=None, max_sentence_size=None, min_sentence_size=10):\n sentences = []\n with tf.gfile.GFile('data_WMT/sentences/sentences.txt', mode=\"r\") as source_file:\n source = source_file.readline()\n print (source)\n counter = 0\n while source and (not max_size or counter < max_size):\n source_ids = [int(x) for x in source]\n if len(source_ids) < max_sentence_size and len(source_ids) > min_sentence_size:\n sentences.append(source_ids)\n ratings.append(rating)\n counter += 1\n if counter % 10000 == 0 and counter != 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source = source_file.readline()\n return sentences", "def generate_sentences(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def sentences(self) -> List[str]:\n\t\treturn [sentence for sentence in re.split('(?<=[.!?])', self.text)]", "def __iter__(self):\n try:\n # Assume it is a file-like object and try treating it as such\n # Things that don't have seek will trigger an exception\n self.source.seek(0)\n for line in itertools.islice(self.source, self.limit):\n line = utils.to_unicode(line).split()\n i = 0\n while i < len(line):\n yield line[i: i + self.max_sentence_length]\n i += self.max_sentence_length\n except AttributeError:\n # If it didn't work like a file, use it as a string filename\n with utils.smart_open(self.source) as fin:\n for line in itertools.islice(fin, self.limit):\n line = utils.to_unicode(line).split()\n i = 0\n while i < len(line):\n yield [self.sos_token] + line[i: i + self.max_sentence_length - 2] + [self.eos_token]\n i += self.max_sentence_length", "def loadTextFile(self):\n if self.tempFilePath is None or not MyFile.checkFileExists(self.tempFilePath):\n raise Exception(\"Temporary text file does not exist!\")\n\n io = Ioread()\n self.sentencesList = io.readFileContentList(self.tempFilePath)", "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def extract_sentences_from_file(path: str) -> list:\n sentences = list()\n with io.open(file=path, mode=\"r\", encoding=\"utf-8\") as input_file:\n content = json.load(input_file)\n\n data = content[\"rasa_nlu_data\"]\n\n # Obtain the list of sentences\n common_examples = data[\"common_examples\"]\n\n for example in common_examples:\n sentences.append(example[\"text\"])\n\n return unique(sentences)", "def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print(\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n #append this lis as an element to the list of sentences\n corpus.append(sentence)\n if i % 1000 == 0:\n #print a status message: str(i) turns int i into a string\n #so we can concatenate it\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\")\n #endif\n #endfor\n return corpus\n else:\n #ideally we would throw an exception here, but this will suffice\n print(\"Error: corpus file \", f, \" does not exist\")\n sys.exit() # exit the script\n #endif", "def tokenize(self, path, training_set=False):\n assert os.path.exists(path)\n with open(path, encoding='utf8') as fin:\n num_lines = sum(1 for _ in fin.readlines())\n with open(path, 'r', encoding=\"utf8\") as f:\n words = []\n for i, line in enumerate(tqdm(f, total=num_lines)):\n if self.max_lines > 0 and i > self.max_lines:\n break\n line = line.strip()\n if not line:\n continue # Skip empty lines.\n elif line.startswith('='):\n continue # Skip headers.\n else:\n sentence = (self.order - 1) * [SOS] + \\\n [process(word, self.lower) for word in line.split()] + [EOS]\n if training_set:\n words.extend(sentence)\n self.vocab.update(sentence)\n else:\n sentence = [word if word in self.vocab else UNK for word in sentence]\n words.extend(sentence)\n return words", "def lines(self):\n\t\tlines = []\n\t\tfor t in self.trigrams:\n\t\t\tfor line in t.lines:\n\t\t\t\tlines.append(line)\n\t\treturn lines", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n lines_c = 0\n for line in f:\n words = ['<start>'] + line.split() + ['<eos>']\n len_ = len(words)\n tokens += len_\n if(self.max_sent_length <len_): self.max_sent_length = len_\n lines_c+=1\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n #print('Creating tensor of size: ', lines_c, self.max_sent_length)\n print('Reading files: ', path)\n ids = [] # torch.LongTensor(lines_c, self.max_sent_length)\n target_vecs = [] # torch.LongTensor(lines_c, self.max_sent_length)\n line_c = 0\n count =0\n for line in f:\n words = ['<start>'] + line.split() + ['<eos>']\n sentence_len = len(words)\n if(sentence_len>self.max_length): \n #print (\"sen len: \", sentence_len, ' exceed limit: ', self.max_length, ' skipped!!', count)\n count+=1\n continue\n ids.append([])\n target_vecs.append([])\n #if(self.max_sent_length<sentence_len): self.max_sent_length = sentence_len\n token = 0\n for word in words:\n if(token<sentence_len-1 ): ids[line_c].append( self.dictionary.word2idx[word])\n if(token>0): target_vecs[line_c].append( self.dictionary.word2idx[word] )\n token += 1\n \n line_c +=1\n\n return ids, target_vecs", "def read(self, content: str):\n documents = []\n # 1. Split the text in documents using string '-DOCSTART- -X- O O' and loop over it\n content = content.split('-DOCSTART- -X- O O')\n for doc in content:\n if doc != '':\n words = []\n sentences = []\n labels = []\n start = 0\n # 2. Split lines and loop over\n str_sentences = doc.split('\\n\\n')\n # 3. Make vectors of tokens and labels (colunn 4) and at the '\\n\\n' make a sentence\n for sentence in str_sentences:\n if sentence != '':\n tokens = sentence.split('\\n')\n for token in tokens:\n if ' ' in token :\n cols = token.split(' ')\n words.append(cols[0])\n labels.append(cols[1])\n sentences.append(Sentence(doc, start, start+len(tokens)))\n start += len(tokens)\n # 4. Create a Document object\n documents.append(Document.create_from_vectors(words, sentences, labels))\n\n return documents", "def current_document_text(self):\n return self.current_document.lines", "def read_conllu(path: Text) -> Iterator[Sequence[Dep]]:\n\n #Initializing variables\n sentence=[]\n words=[]\n none_val=None\n #Reading the file\n with open(path,\"r\",encoding=\"utf-8\") as f:\n for line in f:\n #Parsing sentence by sentence\n if not line.strip().startswith('#') and line.strip():\n lines=line.strip()\n id=lines.split('\\t')[0]\n if lines.split('\\t')[1].startswith('_'):\n form=None\n else:\n form=lines.split('\\t')[1]\n if lines.split('\\t')[2].startswith('_'):\n lemma=None\n else:\n lemma=lines.split('\\t')[2]\n upos=lines.split('\\t')[3]\n if lines.split('\\t')[4].startswith('_'):\n xpos=None\n else:\n xpos=lines.split('\\t')[4]\n if lines.split('\\t')[5].startswith('_'):\n feats=[]\n else:\n feats=lines.split('\\t')[5].split('|')\n if lines.split('\\t')[6].startswith('_'):\n head=None\n else:\n head=lines.split('\\t')[6]\n if lines.split('\\t')[7].startswith('_'):\n deprel=None\n else:\n deprel=lines.split('\\t')[7]\n deps=lines.split('\\t')[8].split('|')\n if lines.split('\\t')[9].startswith('_'):\n misc=None\n else:\n misc=lines.split('\\t')[9]\n words.append(Dep(id,form,lemma,upos,xpos,feats,head,deprel,deps,misc))\n #Append at end of Sentence\n elif len(line.strip())==0:\n sentence.append(words)\n words=[]\n return iter(sentence)", "def read_sents_from_file(f):\r\n sent = []\r\n for line in f:\r\n line = line.strip()\r\n if line == \"\":\r\n if sent != []:\r\n yield 'SENT', Sentence(sent)\r\n sent = []\r\n elif line.startswith('#'):\r\n yield 'COMMENT', line\r\n else:\r\n sent.append(line)\r\n if sent != []:\r\n yield 'SENT', Sentence(sent)\r\n f.close()", "def get_sentence_list_for_word_file(file_path: str) -> List[str]:\n # get file data\n with open(file_path, 'r') as review_file:\n file_text = review_file.read().splitlines()\n return file_text", "def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])", "def anlText(self, inputFile):\n strBuf = \"\"\n splitter = re.compile(self._stcSeps)\n for rawLine in inputFile:\n line = rawLine.replace(\"\\n\", \"\")\n if (not splitter.search(line)): # Don't have a full sentence yet\n strBuf += \" \" + line\n else: # Found a sentence end. Get and process the full sentence.\n tempText = strBuf + line\n while splitter.search(tempText):\n stcList = splitter.split(tempText, 1)\n self.anlSentence(stcList[0])\n tempText = stcList[1] # Store what's left for the next\n strBuf = tempText\n if len(strBuf): # Process whatever is left at the end.\n self.anlSentence(strBuf)", "def __read_lines__(self):\r\n fd = open(self.input_file, \"r\")\r\n lines = fd.readlines()\r\n fd.close()\r\n return lines", "def read_file(name):\n\twith open(name) as f:\n\t\tmodel=f.readlines()\n\n\ts=[]\n\tsequences=[]\n\ti=0;\n\n\tfor word in model:\n\t\ti+=1\n\t\tword=word[:-1]\n\t\ts.append(word)\n\t\tif i %10 == 0 or i == (len(model)-1):\n\t\t\tsequences.append(s)\n\t\t\ts=[]\n\n\treturn sequences", "def load_retrieved_sentences(retrieved_dp, cid):\n if not exists(retrieved_dp):\n raise ValueError('retrieved_dp does not exist: {}'.format(retrieved_dp))\n\n fp = join(retrieved_dp, cid)\n with io.open(fp, encoding='utf-8') as f:\n content = f.readlines()\n\n original_sents = [ll.rstrip('\\n').split('\\t')[-1] for ll in content]\n\n processed_sents = [dataset_parser._proc_sent(ss, rm_dialog=False, rm_stop=True, stem=True)\n for ss in original_sents]\n\n return [original_sents], [processed_sents] # for compatibility of document organization for similarity calculation", "def abstract2sents(abstract):\n\tcur = 0\n\tsents = []\n\twhile True:\n\t\ttry:\n\t\t\tstart_p = abstract.index(SENTENCE_START, cur)\n\t\t\tend_p = abstract.index(SENTENCE_END, start_p + 1)\n\t\t\tcur = end_p + len(SENTENCE_END)\n\t\t\tsents.append(abstract[start_p + len(SENTENCE_START):end_p].strip())\n\t\texcept ValueError as e: # no more sentences\n\t\t\treturn sents", "def read_corpus_data(corpusfname, context_size):\n # Create word to index dictionary and register sentence delimiter and unknown\n # symbols\n word_to_idx = defaultdict(lambda: len(word_to_idx))\n eos_idx = word_to_idx[EOS_SYMBOL]\n UNK = word_to_idx[UNK_SYMBOL]\n\n start_padding = [eos_idx] * context_size\n sentences = []\n\n with open(corpusfname) as corpus:\n for line in corpus:\n # Add the sentence with start and end of sentence delimiters\n sentences.append(start_padding + [word_to_idx[word] for word in line.strip().split()] + [eos_idx])\n\n # Close the dictionary\n word_to_idx = dict(word_to_idx)\n return sentences, word_to_idx", "def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)", "def read(self):\n path = os.path.expanduser(self.path)\n with open(path, encoding=\"utf-8\") as f:\n return f.read().splitlines()", "def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding=\"utf8\") as f:\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding=\"utf8\") as f:\n idss = []\n for line in f:\n words = line.split() + ['<eos>']\n ids = []\n for word in words:\n ids.append(self.dictionary.word2idx[word])\n idss.append(torch.tensor(ids).type(torch.int64))\n ids = torch.cat(idss)\n\n return ids", "def testSentences(self):\n\n textractor = Textractor(sentences=True)\n\n # Extract text as sentences\n sentences = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of sentences is as expected\n self.assertEqual(len(sentences), 17)", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def load_file_sentences(filepath, filename):\n # Read file as string first\n f = open(filepath, 'r')\n text = f.read()\n f.close()\n # Strip the newlines\n text = filter(lambda x: x != '\\n', text)\n # Now use nltks method to read the sentences\n sentences = sent_tokenize(text)\n # convert everything to lower case\n sentences = map(str.lower, sentences)\n \"\"\"sentences = [(s.lower(), filename) for s in sentences]\"\"\"\n # Create segments by clustering. Let's say 3 segments per text.\n # Similarity metric shall be cosine.\n fs = create_feature_space(sentences)\n vectors = [vectorize(fs, sent) for sent in sentences]\n compute_similarity_matrix(vectors, cosine_similarity, filename+\".similarities\")\n segments = cluster_sentences(filename+\".similarities\", __cluto_bin, 3)\n # Stitch it all together\n return zip(sentences, [filename]*len(sentences), segments)", "def sents(infile):\n with io.open(infile, 'r', encoding='utf8') as fin:\n for line in fin:\n yield line.strip()", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt(path):\n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_txt(path):\n \n with open(path, \"r\") as f:\n return f.read().splitlines()", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data", "def load_sentences(args):\n logger = logging.getLogger('logger')\n sentence_list = []\n for file_name in os.listdir(args['data_path']):\n if file_name.endswith('.jsonl'):\n file_path = os.path.join(args['data_path'], file_name)\n logger.info('Loading sentences from file {}.'.format(file_path))\n with open(file_path) as data_file:\n data_lines = data_file.readlines()\n data_json = list(map(lambda x: json.loads(x), data_lines))\n if not args['no_premise']:\n sentence_list += list(map(lambda x: x['sentence1'], data_json))\n if not args['no_hypothesis']:\n sentence_list += list(map(lambda x: x['sentence2'], data_json))\n\n logger.info('Starting to remove duplicates from {} sentences loaded.'.format(len(sentence_list)))\n unique_sent_list = list(set(sentence_list))\n logger.info('Done removing duplicates. Loaded {} unique sentences.'.format(len(unique_sent_list)))\n return unique_sent_list", "def get_processed_sentence(self):\n\n if not hasattr(self, \"in_sentence\"):\n raise AttributeError(f\"{self} does not have attribute 'in_sentence'.\")\n\n try:\n sen_ixs = [sen.element_id for sen in self.in_sentence]\n except TypeError as e:\n sen_ixs = [self.in_sentence.element_id]\n\n sen_procs = [\n s\n for i, s in enumerate(self.in_document.sentences_processed)\n if i in sen_ixs\n ]\n return sen_procs", "def sentences(a, b):\n\n # TODO\n return []", "def sentences_from_string(this_class, text):\n # Note that the native method below leaks. We work around this\n # by acquiring its pointer in __init__\n sentReps = parser.sentRepsFromString(text)\n return list(map(this_class, sentReps))", "def get_text_input(path):\n with open(path, 'r', encoding='utf8') as f:\n sent_dict = json.load(f)\n sents = [sent_dict[i] for i in sent_dict]\n tokenized_sents = [[word[0] for word in sent] for sent in sents]\n return tokenized_sents", "def return_augmented_sentences(self) -> list:\n return self.augmented_sentence_list", "def read_hansard(train_dir, num_sentences):\n \"\"\"\n # TODO\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= len(curr_english):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0 \n # sentences['e'][count] = preprocess(curr_english[index])\n # sentences['f'][count] = preprocess(curr_french[index])\n\n #====================================\n # Return (eng, fre) version:\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= min(len(curr_english), len(curr_french)):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0\n # preprocess and remove SENTSTART and SENTEND from the sentences\n # eng[count] = eng_sentence.split()\n # fre[count] = fre_sentence.split()\n # return (eng, fre)\n \"\"\"\n\n files_examined = set()\n count = 0\n eng = []\n fre = []\n\n # for subdir, dirs, files in os.walk(train_dir):\n # for file in files:\n\n files = os.listdir(train_dir)\n for file in files:\n\n # First set up and validate the files\n file_name, extension = os.path.splitext(file)\n file_name, file_id = os.path.splitext(file_name)\n\n # Skip if not .e or .f file\n if not (extension == '.f' or extension == '.e'):\n continue\n\n # Skip if already examined this file pair\n if file_id in files_examined:\n continue\n\n # Skip if either language file is not available\n eng_file = file_name + file_id + '.e'\n fre_file = file_name + file_id + '.f'\n if eng_file not in files or fre_file not in files:\n continue\n\n # If it reaches here we know we can process it\n files_examined.add(file_id)\n print( \"Reading \" + str(count+1))\n\n # Finally open files and iterate simultaneously\n eng_path = os.path.join(train_dir, eng_file)\n fre_path = os.path.join(train_dir, fre_file)\n with open(eng_path) as english:\n with open(fre_path) as french:\n for E, F in zip(english, french):\n\n # Stop when limit reached\n if count >= num_sentences:\n return (eng, fre)\n\n # Process and split sentences\n E = preprocess(E.rstrip(), 'e')\n F = preprocess(F.rstrip(), 'f')\n\n E_words = E.split()\n F_words = F.split()\n\n eng.append(E_words)\n fre.append(F_words)\n\n count += 1\n\n return (eng, fre)", "def abstract2sents(abstract):\n cur = 0\n sents = []\n while True:\n try:\n start_p = abstract.index(SENTENCE_START, cur)\n end_p = abstract.index(SENTENCE_END, start_p + 1)\n cur = end_p + len(SENTENCE_END)\n sents.append(abstract[start_p+len(SENTENCE_START):end_p])\n except ValueError as e: # no more sentences\n return sents", "def readlines(self):\n return [line for line in self]", "def lemmatise(path, model_spec) -> Dict[str, List[Sentence[Token]]]:\n\n # handle install\n # lets check if we need to install or not\n if check(model_spec) is not True:\n for model in download(model_spec):\n download(model_spec)\n\n # get tagger\n with shutup():\n tagger = get_tagger(model_spec, batch_size=256, device=\"cpu\", model_path=None)\n\n # import iterator and processor\n iterator, processor = getattr(get_imports(get_model(model_spec)), \"get_iterator_and_processor\")(max_tokens=256)\n\n # Get files content\n files = glob.glob(path + '/*.txt')\n content = defaultdict(list)\n for f in files:\n wit = os.path.splitext(os.path.split(f)[-1])[0]\n tok_id_diff = 0\n with open(f, 'r') as doc:\n for tok_id, token in enumerate(tagger.iter_tag_token(\n data=doc.read(),\n iterator=iterator,\n processor=processor,\n empty_token_on_sent_break=True\n )):\n if not content[wit]:\n content[wit].append([])\n # token_dict = {\"form\": t[0], \"id\": \"w_\" + str(tokenId), \"order_id\": str(tokenId)}\n if token is None:\n tok_id_diff -= 1\n content[wit].append([])\n else:\n content[wit][-1].append({\n **token,\n \"id\": f\"w_{tok_id + tok_id_diff}\",\n \"order_id\": str(tok_id + tok_id_diff)\n })\n\n return content", "def loadTweets():\n sentences = []\n tweets = Tweet.objects.all()\n for tweet in tweets:\n sentences.append(tweet.text)\n\n return sentences", "def read_file_unlabeled(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n ret.append( (lines) )\n return ret", "def tokenize(self, path):\n assert os.path.exists(path)\n tokens = 0\n maxLen = 0\n # Find code path and create dictionary\n with open(path, 'r') as f:\n for i, line in enumerate(f):\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n except:\n pass\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n self.tic_marks.append(len(words))\n except Exception as e:\n #raise e\n pass\n return ids" ]
[ "0.691375", "0.6559297", "0.6529974", "0.647818", "0.64719594", "0.6423339", "0.6421859", "0.640695", "0.63902426", "0.63782775", "0.6366519", "0.630013", "0.62926716", "0.61867374", "0.6146142", "0.60801035", "0.60796994", "0.6057292", "0.60563534", "0.605594", "0.605538", "0.6034282", "0.59877115", "0.59719175", "0.59719175", "0.59065545", "0.58504677", "0.58386064", "0.5837905", "0.58347994", "0.5825598", "0.5824398", "0.5824071", "0.5793758", "0.5792332", "0.5778574", "0.5759494", "0.5756402", "0.5733621", "0.5707824", "0.5676733", "0.567393", "0.56401473", "0.55700284", "0.5541959", "0.5536199", "0.55354506", "0.5532502", "0.5523709", "0.55232036", "0.5521497", "0.5505014", "0.5499491", "0.5498532", "0.5490758", "0.548741", "0.54840255", "0.5474318", "0.54646957", "0.54542816", "0.54531616", "0.54492927", "0.54416627", "0.5416082", "0.54024833", "0.53990203", "0.5397635", "0.5389974", "0.538177", "0.53804404", "0.5376952", "0.5375171", "0.53620744", "0.5360065", "0.53471804", "0.5344347", "0.5325772", "0.5305619", "0.5290244", "0.5289434", "0.5278822", "0.5273696", "0.52722895", "0.52688575", "0.52688575", "0.5267491", "0.5267184", "0.52641696", "0.52354205", "0.5233647", "0.52319497", "0.52305365", "0.5226809", "0.5225291", "0.5222548", "0.5218431", "0.5214645", "0.5211813", "0.5201899", "0.5201262" ]
0.5305037
78
Returns the ``BatchStats`` for a specific batch.
def get_batch_stats(self, batch): return self.batch_stats[batch]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_batch_stats():\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def build_batch_stats():\n\n # Copy for better stability.\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n second_moment = variance + tf.square(mean)\n\n return mean, variance, second_moment", "def get_plant_batch_stats(db_path: str) -> int:\n return get_db_count(db_path, 'batches.db', 'batches')", "def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def batch_to_dict(batch: BatchTrial) -> Dict[str, Any]:\n return {\n \"__type\": batch.__class__.__name__,\n \"index\": batch.index,\n \"trial_type\": batch.trial_type,\n \"ttl_seconds\": batch.ttl_seconds,\n \"status\": batch.status,\n \"status_quo\": batch.status_quo,\n \"status_quo_weight_override\": batch._status_quo_weight_override,\n \"time_created\": batch.time_created,\n \"time_completed\": batch.time_completed,\n \"time_staged\": batch.time_staged,\n \"time_run_started\": batch.time_run_started,\n \"abandoned_reason\": batch.abandoned_reason,\n \"run_metadata\": batch.run_metadata,\n \"stop_metadata\": batch.stop_metadata,\n \"generator_run_structs\": batch.generator_run_structs,\n \"runner\": batch.runner,\n \"abandoned_arms_metadata\": batch._abandoned_arms_metadata,\n \"num_arms_created\": batch._num_arms_created,\n \"optimize_for_power\": batch.optimize_for_power,\n \"generation_step_index\": batch._generation_step_index,\n \"properties\": batch._properties,\n }", "def get_batch(self):\n return self.batch", "def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def stats_batchwise(data_source, batch_size=1024):\n mean = np.zeros(data_source.dshape, dtype=np.float32)\n mean_xs = np.zeros_like(mean, dtype=np.float32)\n\n for x, _ in iterate_batches(data_source, batch_size, expand=False):\n corr_fact = float(x.shape[0]) / batch_size\n mean += x.mean(axis=0) * corr_fact\n mean_xs += (x ** 2).mean(axis=0) * corr_fact\n\n corr_fact = float(batch_size) / data_source.n_data\n mean *= corr_fact\n mean_xs *= corr_fact\n std = np.sqrt(mean_xs - mean ** 2)\n\n return mean, std", "def get_batch(self, batch_id):\n #fmt = lambda x: join(self.path, self.simulation_paths[x])\n fmt = lambda x: self.simulation_paths[x]\n simulation_paths = [fmt(i) for i in self.batch_indices[batch_id]]\n return Batch(simulation_paths, root=self.path)", "def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)", "def __call__(self, batch: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, float]]:\n obs, actions, next_obs = get_keys(batch, *self.batch_keys)\n loss = -self.model_likelihood(obs, actions, next_obs).mean()\n return loss, {\"loss(model)\": loss.item()}", "def batch_info():\n return BatchInfo(\"UFG Hackathon\")", "def get_batch(self, batch_size):\n return random.sample(self.buffer, batch_size)", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def sync_batch_stats(state: TrainState) -> TrainState:\n # Each device has its own version of the running average batch\n # statistics and those are synced before evaluation\n return state.replace(batch_stats=cross_replica_mean(state.batch_stats))", "def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]:\n batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)\n data, labels = batch\n\n output = self.model(data)\n accuracy = accuracy_rate(output, labels)\n return {\"validation_accuracy\": accuracy, \"validation_error\": 1.0 - accuracy}", "def get_loss(self, session, batch):\n\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n # Note: don't supply keep_prob here, so it will default to 1 i.e. no dropout\n output_feed = [self.dev_loss]\n [loss] = session.run(output_feed, input_feed)\n\n return loss", "def get_batch(self):\n\t\tbatch = np.arange(len(self.mem_size))\n\t\tbatch = batch[:self.batch_size]\n\n\t\treturn np.array(self.memory)[batch]", "def batch_metadata(batch_name):\r\n sql = \"\"\"\r\n select top 1 batch_id, batch_name, notification_email_recipients\r\n from dbo.Metadata_ETL_Batch\r\n where batch_name = '{}';\r\n \"\"\".format(batch_name)\r\n with pyodbc.connect(ETL_LOAD_A_ODBC_STRING) as conn:\r\n cursor = conn.execute(sql)\r\n field_names = [column[0] for column in cursor.description]\r\n try:\r\n meta = next(dict(zip(field_names, row)) for row in cursor)\r\n except StopIteration:\r\n raise AttributeError(\"batch_name does not exist in\"\r\n \" batch metadata table.\")\r\n return meta", "def get_batch_data_and_metadata(\n self,\n batch_definition: BatchDefinition,\n ) -> Tuple[Any, BatchSpec, BatchMarkers]: # batch_data\n batch_spec: BatchSpec = self.build_batch_spec(batch_definition=batch_definition)\n batch_data, batch_markers = self._execution_engine.get_batch_data_and_markers(\n batch_spec=batch_spec\n )\n self._execution_engine.load_batch_data(batch_definition.id, batch_data)\n return (\n batch_data,\n batch_spec,\n batch_markers,\n )", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def get_batch(self, batch_size):\n n, _ = self.contexts.shape\n if self.buffer_s == -1:\n # use all the data\n ind = np.random.choice(range(n), batch_size)\n else:\n # use only buffer (last buffer_s observations)\n ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)\n return self.contexts[ind, :], self.rewards[ind, :]", "def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n batch.append((*data, idx))\n idx = np.array([i[5] for i in batch])\n #TD errors are only updated for transitions that are replayed\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n s_batch = np.array([i[0] for i in batch])\n a_batch = np.array([i[1] for i in batch])\n r_batch = np.array([i[2] for i in batch])\n d_batch = np.array([i[3] for i in batch])\n new_s_batch = np.array([i[4] for i in batch])\n\n return s_batch, a_batch, r_batch, d_batch, new_s_batch, idx", "def get_stat(self, name: str) -> int:\n return self._mallctl(f\"stats.{name}\")", "def __call__(self, batch):\n obs, is_ratios = dutil.get_keys(batch, SampleBatch.CUR_OBS, self.IS_RATIOS)\n\n values = self.critic(obs).squeeze(-1)\n with torch.no_grad():\n targets = self.sampled_one_step_state_values(batch)\n value_loss = torch.mean(\n is_ratios * torch.nn.MSELoss(reduction=\"none\")(values, targets) / 2\n )\n return value_loss, {\"loss(critic)\": value_loss.item()}", "def run_training_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 1.0\n fetches = [self.loss, self.train_op]\n\n # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n \n loss, _ = session.run(fetches, feed_dict=feed_dict)\n # loss, _ = session.run(fetches,\n # feed_dict=feed_dict,\n # options=options,\n # run_metadata=run_metadata)\n \n # fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n # chrome_trace = fetched_timeline.generate_chrome_trace_format()\n # with open('timeline.json', 'w') as f:\n # f.write(chrome_trace)\n \n return loss", "def calculate_batch_metrics(self):\n pass", "def sample_batch(\n self, batch_index: int = 0, batch_size: int = 1, batch_as_list: bool = True\n ) -> Union[List[numpy.ndarray], Dict[str, numpy.ndarray]]:\n loader = self.data_loader(batch_size=batch_size, batch_as_list=batch_as_list)\n\n return loader.get_batch(bath_index=batch_index)", "def set_batch_stats(self, x):\n\n if self.set_stats_f is None:\n self.set_stats_f = theano.function(\n inputs=[self.input],\n updates=[(self.bm, self.m), (self.bv, self.v)]\n )\n\n self.set_stats_f(x.astype(dtype))", "def get_stats(self):\n return self.manager.get_stats(self)", "def get_stats(self):\n return scales.getStats()[self.stats_name]", "def stats(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\n (self.sessionid, self.name))\n return self.server.json_body(resp)", "def get_batch(dataset, batch_size=32):\n data_count = len(dataset)\n x_rand = np.random.randint(0, data_count, size=batch_size)\n y_list = []\n for i in range(0, batch_size):\n y_list.append(dataset[x_rand[i]]['TCLOSE'])\n batch_x = torch.tensor(np.array([x_rand]).T).float()\n batch_y = torch.tensor(np.array([y_list]).T).float()\n return batch_x, batch_y", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def sample(self, num_items: int, beta: float) -> SampleBatchType:\n assert beta >= 0.0\n\n idxes = self._sample_proportional(num_items)\n\n weights = []\n batch_indexes = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage))**(-beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self._storage))**(-beta)\n count = self._storage[idx].count\n weights.extend([weight / max_weight] * count)\n batch_indexes.extend([idx] * count)\n self._num_timesteps_sampled += count\n batch = self._encode_sample(idxes)\n\n # Note: prioritization is not supported in lockstep replay mode.\n if isinstance(batch, SampleBatch):\n assert len(weights) == batch.count\n assert len(batch_indexes) == batch.count\n batch[\"weights\"] = np.array(weights)\n batch[\"batch_indexes\"] = np.array(batch_indexes)\n\n return batch", "def get_batch_runs(self):\n return self.batch_runs", "def getMiniBatch(self, batch_size, beta):\r\n assert beta > 0\r\n N = len(self.buffer)\r\n # function to sample via probability of the transactions\r\n indexes = self._sample_proportional(batch_size)\r\n weights = []\r\n batch_transitions = []\r\n sum = self._it_sum.sum()\r\n prob_min = self._it_min.min() / sum\r\n max_weight = (prob_min * N) ** (-beta) # according to PER paper,\r\n # max weight is used to normalize the weights\r\n for idx in indexes:\r\n prob_sample = self._it_sum[idx] / sum\r\n weight = (prob_sample * N) ** (-beta) # fixes the bias high prob transaction introduce\r\n weights.append(weight)\r\n batch_transitions.append(self.buffer[idx])\r\n weights /= np.ones_like(weights) * max_weight # normalize\r\n return batch_transitions, weights, indexes", "def monitor_batch_statistical_analysis(\n batch_request: BatchStatisticalRequestSpec,\n config: Optional[SHConfig] = None,\n sleep_time: int = _DEFAULT_ANALYSIS_SLEEP_TIME,\n) -> BatchStatisticalRequest:\n if sleep_time < _MIN_ANALYSIS_SLEEP_TIME:\n raise ValueError(\n f\"To avoid making too many service requests please set analysis sleep time >={_MIN_ANALYSIS_SLEEP_TIME}\"\n )\n\n batch_client = SentinelHubBatchStatistical(config=config)\n request_status = BatchRequestStatus(batch_client.get_status(batch_request)[\"status\"])\n while request_status in [BatchRequestStatus.CREATED, BatchRequestStatus.ANALYSING]:\n LOGGER.info(\"Batch job has a status %s, sleeping for %d seconds\", request_status.value, sleep_time)\n time.sleep(sleep_time)\n request_status = BatchRequestStatus(batch_client.get_status(batch_request)[\"status\"])\n\n batch_request = batch_client.get_request(batch_request)\n batch_request.raise_for_status(status=[BatchRequestStatus.FAILED, BatchRequestStatus.CANCELED])\n return batch_request", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)", "def score(self, session, X_batch):\n input_feed = {}\n\n input_feed[self.X] = X_batch\n input_feed[self.is_training] = False\n\n output_feed = [self.y_out]\n\n outputs = session.run(output_feed, input_feed)\n outputs = outputs[0] # Run returns the outputfeed as a list. We just want the first element\n\n return np.array(outputs)", "def batch(self):\n return self._batch", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def get_stats(self):\n return self.stats", "async def skribbl_get_stats(self) -> int:\r\n return await self.read(self._skribbl_get_stats)", "def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]", "def update(self, batch):\n assert self.is_training\n # process batch\n if self.warm_started:\n results = self.update_batch_multi_env(batch)\n else:\n results = self.update_batch_erm(batch)\n # log results\n self.update_log(results)\n return self.sanitize_dict(results)", "def __call__(self, batch: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, float]]:\n obs, actions, next_obs = get_keys(batch, *self.batch_keys)\n logps = self.model_likelihoods(obs, actions, next_obs)\n loss = -torch.stack(logps)\n info = {f\"loss(models[{i}])\": -l.item() for i, l in enumerate(logps)}\n return loss, info", "def batch_job_status_details(batch_name):\r\n sql = \"\"\"\r\n select\r\n job_id, job_name, job_status, job_status_description,\r\n start_time, end_time\r\n from dbo.Metadata_ETL_Job_Status_vw\r\n where batch_name = '{}';\r\n \"\"\".format(batch_name)\r\n with pyodbc.connect(ETL_LOAD_A_ODBC_STRING) as conn:\r\n cursor = conn.execute(sql)\r\n field_names = [column[0] for column in cursor.description]\r\n for row in cursor:\r\n yield dict(zip(field_names, row))", "def processBatchFunc(inputBatch): #this is redefined automatically, no need to do any special code here\n with tf.Session(graph=graph, config=config) as sess:\n sess.run(init)\n \n (outValue, ) = sess.run([outputs], feed_dict={inputs: inputBatch})\n \n #print \"count now is: %d\" % count\n\n return outValue", "def get_shipments_by_batch(auth, batch_id, base_url='https://api.cratejoy.com/v1/'):\n \n shipment_endpoint = '{}shipments/?batch_id={}'.format(base_url, batch_id)\n\n resp = requests.get(\n shipment_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(shipment_endpoint,\n resp.status_code))\n print(resp.content)", "def __getitem__(self, index):\n # get the indexs of each batch\n batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n # using batch_indexs to get path of current batch\n batch_path = [self.X_path[k] for k in batch_indexs]\n # get batch data\n batch_x, batch_y = self.data_generation(batch_path)\n return batch_x, batch_y", "def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)", "def batch_stat(x):\n\tmean = torch.mean(x, dim=[0, 2, 3], keepdim=True)\n\tvar = torch.mean((x-mean)**2, dim=[0, 2, 3], keepdim=True)\n\treturn mean, var", "def reduce_stats(ds, statistic, batch_size=1, **kwargs):\n logger.info(\n \"Iterating over whole dataset to compute statistic '%s' with batch size %d%s\",\n statistic,\n batch_size,\n \" using kwargs:\\n {}\".format(_dict_to_logstring(kwargs)) if kwargs else '')\n\n batch_size = tf.constant(batch_size, tf.int64)\n if statistic == \"num_elements\":\n tmp_ds = ds.batch(batch_size, drop_remainder=True)\n num_batches = tmp_ds.reduce(tf.constant(0, tf.int64), lambda c, x: c + 1)\n num_batched = batch_size * num_batches\n num_remainder = ds.skip(num_batched).reduce(tf.zeros([], tf.int64), lambda c, x: c + 1)\n logger.info(\n \"Num batches %d, batch_size %d, num batched elements %d, num remainder %d, total num elements: %d.\",\n num_batches.numpy(), batch_size.numpy(), num_batched.numpy(), num_remainder.numpy(), (num_batched + num_remainder).numpy())\n\n elif statistic == \"vad_ratio\":\n # Peek VAD frame length from first element\n vad_frame_length_ms = list(ds.take(1).as_numpy_iterator())[0][\"vad_frame_length_ms\"]\n num, num_speech, num_not_speech, speech_ratio = tf_utils.compute_vad_decision_stats(ds, batch_size)\n logger.info(\n (\"VAD frame statistics:\\n \"\n \"num signals %15d\\n \"\n \"vad frame len %15d ms\\n \"\n \"kept %15d\\n \"\n \"dropped %15d\\n \"\n \"total %15d\\n \"\n \"kept ratio %15.3f\"),\n num.numpy(),\n vad_frame_length_ms,\n num_speech.numpy(),\n num_not_speech.numpy(),\n (num_speech + num_not_speech).numpy(),\n speech_ratio.numpy())\n\n elif statistic == \"size_counts\":\n key = kwargs[\"key\"]\n ndims = kwargs[\"ndims\"]\n logger.info(\"Computing frequencies of sizes in all dimensions for key '%s' of every element of ds. Assuming ndims %d.\", key, ndims)\n size_counts_by_axis = tf_utils.count_dim_sizes(ds.batch(batch_size), key, ndims)\n with io.StringIO() as sstream:\n for axis, size_counts in enumerate(size_counts_by_axis):\n print(\"\\n axis/dim {:d}:\\n [freq dim-size]\".format(axis), file=sstream)\n for count in size_counts.numpy():\n print(\" {}\".format(count), file=sstream)\n logger.info(\"%s\", sstream.getvalue())\n\n elif statistic == \"num_non_finite\":\n key = kwargs[\"key\"]\n axis = kwargs.get(\"axis\")\n logger.info(\n \"Computing frequency of non-finite tensors for key '%s' over axis %s in batches of %d.\",\n key, str(axis), batch_size.numpy())\n count_non_finite = lambda t, axis=axis: tf.cast(not tf.math.reduce_any(tf.math.is_finite(t), axis=axis), tf.int64)\n accumulate_batches = lambda c, x: (c[0] + 1, c[1] + count_non_finite(x[key]))\n num_batches, num_non_finite = (ds\n .batch(batch_size, drop_remainder=True)\n .reduce((tf.zeros([], tf.int64), tf.zeros([batch_size], tf.int64)), accumulate_batches))\n num_batched = batch_size * num_batches\n num_remainder, num_non_finite_remainder = (ds\n .skip(num_batched)\n .batch(1)\n .reduce((tf.zeros([], tf.int64), tf.zeros([], tf.int64)), accumulate_batches))\n num_non_finite = tf.math.reduce_sum(num_non_finite) + num_non_finite_remainder\n logger.debug(\n \"\\n num_batches %d\\n batch_size %d\\n num_batched %d\\n num_remainder %d\\n total num elements %d\\n num_non_finite %d\\n num_non_finite_remainder %d\",\n *[t.numpy() for t in (num_batches, batch_size, num_batched, num_remainder, num_batched + num_remainder, num_non_finite, num_non_finite_remainder)])\n logger.info(\n \"Dataset has %d tensors in total under key '%s' of which %d tensors have one or more non-finite values (NaN or inf).\",\n (num_batched + num_remainder).numpy(), key, num_non_finite.numpy())\n\n elif statistic == \"min_max_mean\":\n key = kwargs[\"key\"]\n logger.info(\n \"Computing minimum, maximum and mean scalar values over all tensors under key '%s' in batches of %d.\",\n key, batch_size.numpy())\n min, max, num, sum = tf_utils.reduce_min_max_num_sum(ds, key, batch_size)\n mean = tf.math.divide_no_nan(sum, tf.cast(num, tf.float64))\n logger.info(\n \"\\n key '%s'\\n total num scalars %s\\n min %.6f\\n max %.6f\\n mean %.6f\\n sum %.6f\",\n key, format(num.numpy(), \",\"), min.numpy(), max.numpy(), mean.numpy(), sum.numpy())\n\n else:\n logger.error(\"Unknown statistic type '%s', cannot compute stats for dataset.\", statistic)\n\n return ds", "def monitor_batch_statistical_job(\n batch_request: BatchStatisticalRequestSpec,\n config: Optional[SHConfig] = None,\n sleep_time: int = _DEFAULT_STAT_SLEEP_TIME,\n analysis_sleep_time: int = _DEFAULT_ANALYSIS_SLEEP_TIME,\n) -> JsonDict:\n if sleep_time < _MIN_STAT_SLEEP_TIME:\n raise ValueError(f\"To avoid making too many service requests please set sleep_time>={_MIN_STAT_SLEEP_TIME}\")\n\n batch_request = monitor_batch_statistical_analysis(batch_request, config, sleep_time=analysis_sleep_time)\n if batch_request.status is BatchRequestStatus.PROCESSING:\n LOGGER.info(\"Batch job is running\")\n\n batch_client = SentinelHubBatchStatistical(config=config)\n\n request_status = batch_client.get_status(batch_request)\n progress = request_status[\"completionPercentage\"]\n with tqdm(total=100, initial=progress, desc=\"Completion percentage\") as progress_bar:\n while progress < 100:\n time.sleep(sleep_time)\n\n request_status = batch_client.get_status(batch_request)\n new_progress = request_status[\"completionPercentage\"]\n progress_bar.update(new_progress - progress)\n progress = new_progress\n return request_status", "def get_stats_by_adaccount(self, account_id, batch=False, start_time=None, end_time=None):\n args = {}\n start_time = start_time or 0\n path = 'act_{0}/stats/{1}'.format(account_id, self.__parse_time(start_time))\n if end_time:\n path = path + '/{0}'.format(self.__parse_time(end_time))\n return iterate_by_page(self.make_request(path, 'GET', args, batch=batch))", "def get_stats(self, loadbalancer=None):\n uri = \"/loadbalancers/%s/stats\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body", "def __getitem__(self, idx):\n return self.batches[idx]", "def batch_idx(self):\n if self._batch_idx >= self.batch_size:\n self.clear_batch()\n \n return self._batch_idx", "def GetBatchJob(client, batch_job_id):\n batch_job_service = client.GetService('BatchJobService')\n\n selector = {\n 'fields': ['Id', 'Status', 'DownloadUrl'],\n 'predicates': [\n {\n 'field': 'Id',\n 'operator': 'EQUALS',\n 'values': [batch_job_id]\n }\n ]\n }\n\n return batch_job_service.get(selector)['entries'][0]", "def current_stat(self, stat: Stat) -> int:\n return self.stats[stat]", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def get_statistics(session, model, x_data, y_data, batch_size):\n\n losses = []\n\n labels = []\n predicted_labels = []\n\n batches_generator = utilities.get_batches_generator(x_data, y_data, batch_size)\n data_size = x_data.shape[0]\n\n for _ in range(data_size // batch_size):\n\n x_batch, y_batch = next(batches_generator)\n\n feed_dictionary = {\n model.x_placeholder: x_batch,\n model.y_placeholder: y_batch\n }\n\n batch_loss, batch_prediction = session.run([model.loss_op, model.prediction], feed_dictionary)\n\n losses.append(batch_loss)\n\n labels_batch = np.argmax(y_batch, axis=1).flatten()\n labels.extend(labels_batch)\n\n predicted_batch_labels = np.argmax(batch_prediction, axis=1).flatten()\n predicted_labels.extend(predicted_batch_labels)\n\n loss = np.mean(losses)\n accuracy = np.mean((np.array(labels) == np.array(predicted_labels)).astype(np.float32))\n\n return loss, accuracy", "def GetStats(self):\r\n\t\tArg1 = self.href\r\n\t\treturn self._execute('GetStats', payload=locals(), response_object=None)", "def getStats(self):\n\n raise NotImplementedError", "def get_async_job_result(self, account_id, job_id, batch=False):\n path = 'act_%s/reportstats' % account_id\n args = {\n 'report_run_id': job_id\n }\n return self.make_request(path, 'GET', args=args, batch=batch)", "def get_bulk_send_batch_status_with_http_info(self, account_id, bulk_send_batch_id, **kwargs):\n\n all_params = ['account_id', 'bulk_send_batch_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_bulk_send_batch_status\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'account_id' is set\n if ('account_id' not in params) or (params['account_id'] is None):\n raise ValueError(\"Missing the required parameter `account_id` when calling `get_bulk_send_batch_status`\")\n # verify the required parameter 'bulk_send_batch_id' is set\n if ('bulk_send_batch_id' not in params) or (params['bulk_send_batch_id'] is None):\n raise ValueError(\"Missing the required parameter `bulk_send_batch_id` when calling `get_bulk_send_batch_status`\")\n\n\n collection_formats = {}\n\n resource_path = '/v2.1/accounts/{accountId}/bulk_send_batch/{bulkSendBatchId}'.replace('{format}', 'json')\n path_params = {}\n if 'account_id' in params:\n path_params['accountId'] = params['account_id']\n if 'bulk_send_batch_id' in params:\n path_params['bulkSendBatchId'] = params['bulk_send_batch_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='BulkSendBatchStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def get_batch_with_weights(self, batch_size):\n n, _ = self.contexts.shape\n if self.buffer_s == -1:\n # use all the data\n ind = np.random.choice(range(n), batch_size)\n else:\n # use only buffer (last buffer_s obs)\n ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)\n\n weights = np.zeros((batch_size, self.num_actions))\n sampled_actions = np.array(self.actions)[ind]\n a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)])\n weights[a_ind[:, 0], a_ind[:, 1]] = 1.0\n return self.contexts[ind, :], self.rewards[ind, :], weights", "def sample_batch(self, batch_B, batch_T=None):\n batch_T = self.batch_T if batch_T is None else batch_T\n T_idxs, B_idxs = self.sample_idxs(batch_B, batch_T)\n return self.extract_batch(T_idxs, B_idxs, batch_T)", "def batch_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"batch_percentage\")", "def generate_statistics_in_memory(\n record_batch: pa.RecordBatch,\n options: stats_options.StatsOptions = stats_options.StatsOptions()\n) -> statistics_pb2.DatasetFeatureStatisticsList:\n stats_generators = cast(List[stats_generator.CombinerStatsGenerator],\n get_generators(options, in_memory=True))\n partial_stats = generate_partial_statistics_in_memory(record_batch, options,\n stats_generators)\n return extract_statistics_output(partial_stats, stats_generators)", "def get_job_stats(self, job_name):\n return self.manager.get_job_stats(job_name)", "def _get_batch(self, batch_size=1):\n\t\t# Build samples from a normal distribution with zero mean\n\t\t# and variance of one.\n\t\trandom = torch.randn(batch_size)\n\t\tx = self._make_features(random)\n\t\treturn Variable(x)", "def stats(self):\n return self._stats", "def report_func(epoch, batch, num_batches, start_time, report_stats,\n report_every):\n if batch % report_every == -1 % report_every:\n report_stats.output(epoch, batch + 1, num_batches, start_time)\n report_stats = utils.Statistics()\n\n return report_stats", "def run_testing_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 0.0\n fetches = [self.loss, self.predictions]\n loss, probabilities = session.run(fetches, feed_dict=feed_dict)\n return loss, probabilities", "def sample(self, batch_size, beta):\n assert beta > 0\n\n idxes = self._sample_proportional(batch_size)\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage)) ** (-beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self._storage)) ** (-beta)\n weights.append(weight / max_weight)\n weights = np.array(weights)\n encoded_sample = self._encode_sample(idxes)\n return tuple(list(encoded_sample) + [weights, idxes])", "def _get_batch_tiles_per_status(\n batch_request: BatchRequest, batch_client: SentinelHubBatch\n) -> DefaultDict[BatchTileStatus, List[dict]]:\n tiles_per_status = defaultdict(list)\n\n for tile in batch_client.iter_tiles(batch_request):\n status = BatchTileStatus(tile[\"status\"])\n tiles_per_status[status].append(tile)\n\n return tiles_per_status", "def _min_sampled_from_batch(self):\n return min([col._last_batch_size for col in self._profile], default=0)", "def nextBatch(self, batch_to_get_id=None):\n\n # batch id to get\n if batch_to_get_id is None:\n batch_to_get_id = self.current_batch_id\n\n # batch to get\n batch_to_get = self.batches[batch_to_get_id]\n\n # check if batch is available in memory / disk\n if batch_to_get.is_stored:\n # get batch data\n X_data, y_data = batch_to_get.getBatchData()\n # return X np array, label array\n return X_data, y_data\n\n # get data of current batch\n urls = list()\n\n for key in batch_to_get.ids:\n value = self.data_dict.data_dict[key]\n batch_to_get.batch_subjects[key] = value\n batch_to_get.y_data.append(value['label'])\n urls.append(value['path'])\n\n # get images using Image Loader class\n binary_images = self.imageLoader.getImages(urls)\n\n # convert images to array\n X_data = self._listOfImagesToNumpy(images=binary_images)\n y_data = np.array(batch_to_get.y_data)\n\n # decide where to store batch\n system_memory_usage_percent = psutil.virtual_memory()[2]\n if (system_memory_usage_percent < 90):\n save_to = \"memory\"\n elif self.disk_scratch is not None:\n save_to = \"disk\"\n elif self.disk_scratch is not None:\n save_to = \"disk_raw\"\n else:\n save_to = \"none\"\n\n # store batch\n batch_to_get.storeBatch(storage=save_to, X_data=X_data,\n y_data=y_data)\n\n # increment current batch\n if self.current_batch_id < (self.n_batches-1):\n self.current_batch_id += 1\n else:\n self.current_batch_id = 0\n\n # return X np array, label array\n return X_data, y_data", "def evaluate_batch(self, batch, stage):\n if stage != sb.Stage.TEST:\n # Same as before\n out = self.compute_forward(batch, stage=stage)\n loss = self.compute_objectives(out, batch, stage=stage)\n out_prob = self.compute_forward(batch, stage=stage)\n out_prob = out_prob.squeeze(1)\n score, index = torch.max(out_prob, dim=-1)\n cm_scores = [out_prob[i].item() for i in range(out_prob.shape[0])]\n self.pd_out['files'] += batch.id\n self.pd_out['scores'] += cm_scores\n return loss.detach().cpu()\n else:\n out_prob = self.compute_forward(batch, stage=stage)\n out_prob = out_prob.squeeze(1)\n score, index = torch.max(out_prob, dim=-1)\n # text_lab = self.hparams.label_encoder.decode_torch(index)\n return out_prob, score, index\n # return out_prob, score, index, text_lab", "def get_stats(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/stats\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Update object\n self.stats = _response.json()", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def get_stats(self, dim, prefix=''):\n def add_stat(stats, key, val):\n stats[prefix+key] = val\n def get_stat(stats, key):\n return stats[prefix+key]\n #print dim\n #print self.num_cells\n #print self.data\n p = self.get_points(dim)\n s = OrderedDict()\n add_stat(s, 'num_cells', self.num_cells)\n add_stat(s, 'min', np.min(p))\n add_stat(s, 'max', np.max(p))\n add_stat(s, 'average', np.average(p))\n add_stat(s, 'std', np.std(p))\n add_stat(s, 'median', np.median(p))\n add_stat(s, 'gaussian_fit', \n self.gaussian_pdf_compare(\n dim, 100,\n get_stat(s, 'average'),\n get_stat(s, 'std')))\n\n keys = s.keys()\n vals = np.array([s.values()])\n ret = DataTable(vals, keys, name=self.sub_name('stats for %s' % dim))\n ret.properties['original_table'] = self\n return ret", "def process_state_batch(self, batch):\n return batch", "def process_state_batch(self, batch):\n return batch", "def get_run_stats(self):\n return self.run_stats", "def train_batch(self,X_batch,Y_batch):\n\n average_loss = 0\n for x, y in zip(X_batch, Y_batch):\n datum_loss = self.train_datum(x,y)\n average_loss += datum_loss / self.batch_size\n\n # Update weights on all layers after processing the batch\n for l in self.layers:\n l.update_weights()\n\n return average_loss", "def get_stats(self):\n if self.character_data is None: raise Exception('You must call get_character() first.')\n character = self.character_data\n if self._stats is not None:\n return self._stats\n\n try:\n prof_bonus = int(character.value(\"H14\"))\n except (TypeError, ValueError):\n raise MissingAttribute(\"Proficiency Bonus\")\n\n index = 15\n stat_dict = {}\n for stat in ('strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma'):\n try:\n stat_dict[stat] = int(character.value(\"C\" + str(index)))\n index += 5\n except (TypeError, ValueError):\n raise MissingAttribute(stat)\n\n stats = BaseStats(prof_bonus, **stat_dict)\n self._stats = stats\n return stats", "def get_conversion_stats(self, adgroup_id, batch=False):\n path = '%s/conversions' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def _build_statistics_variance(self, input_batch,\n reduction_indices, use_batch_stats):\n # Set up our moving statistics. When connecting in parallel, this is shared.\n self._moving_mean = tf.get_variable(\n \"moving_mean\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.zeros_initializer(),\n trainable=False)\n\n self._moving_variance = tf.get_variable(\n \"moving_variance\",\n shape=self._mean_shape,\n collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES],\n initializer=tf.ones_initializer(),\n trainable=False)\n\n def build_batch_stats():\n \"\"\"Builds the batch statistics calculation ops.\"\"\"\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance\n\n def build_moving_stats():\n return (\n tf.identity(self._moving_mean),\n tf.identity(self._moving_variance),\n )\n\n mean, variance = utils.smart_cond(\n use_batch_stats,\n build_batch_stats,\n build_moving_stats,\n )\n\n return mean, variance", "def _get_from_datastore( uuid ):\n return db.Query(Stats).filter('uuid =', uuid).get()" ]
[ "0.65609884", "0.65114206", "0.65014577", "0.6211535", "0.59668744", "0.5954246", "0.58340657", "0.5821755", "0.57448745", "0.57334435", "0.57184416", "0.5670613", "0.56242794", "0.560072", "0.55778915", "0.5565209", "0.55386823", "0.5498818", "0.54651314", "0.5450578", "0.54305553", "0.54261255", "0.54030293", "0.5400469", "0.53819317", "0.53796047", "0.53737915", "0.5369569", "0.5344904", "0.5344596", "0.53435737", "0.53434104", "0.531763", "0.53123295", "0.53054625", "0.52875733", "0.52668315", "0.5263415", "0.52572644", "0.52546406", "0.5251265", "0.52494735", "0.5245417", "0.5245417", "0.5245417", "0.5245417", "0.52382624", "0.52364165", "0.5224058", "0.5218461", "0.5216408", "0.5211477", "0.52088225", "0.51990825", "0.51934975", "0.51841277", "0.51568544", "0.5155182", "0.51499", "0.51452255", "0.51436", "0.5140425", "0.51386046", "0.51325625", "0.5127544", "0.5123182", "0.5112117", "0.51119214", "0.5111914", "0.510565", "0.51045394", "0.5098643", "0.50946516", "0.509156", "0.50910246", "0.508644", "0.50739026", "0.50565153", "0.50511104", "0.5045082", "0.50447375", "0.50278085", "0.50237495", "0.5021762", "0.5016926", "0.5005602", "0.49991605", "0.49939558", "0.4991128", "0.4984678", "0.49758872", "0.4975125", "0.49671465", "0.49671465", "0.49545583", "0.4936158", "0.4929104", "0.49288332", "0.49237537", "0.49120685" ]
0.89985496
0
Returns the number of batches in the current document inventory.
def num_batches(self): return len(self.batch_stats)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def count_records(batches: List[Batch]) -> int:\n return sum(b.current_size for b in batches)", "def get_num_batches(self, instances: Iterable[Instance]) -> int:\n n_docs = len(set([instance[\"metadata\"][\"doc_key\"] for instance in instances]))\n return n_docs", "def num_sown_batches(self):\n self.calc_progress()\n return self._num_sown_batches", "def get_num_items(self):\r\n return self.num_items", "def batch_size(self):\n return self.size", "def item_count(self):\n return self.items.shape[0]", "def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size", "def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))", "def size(self):\n return self.num_item", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def items_num(self):\n return len(self.items)", "def total_train_batches(self) -> int:\n return self.trainer.num_training_batches", "def items_num(self):\n\t\treturn len(self.items)", "def items_num(self):\n\t\treturn len(self.items)", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def batch_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_node_count\")", "def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")", "def batch_request_size(self):\n return self._batch_request_size", "def quantity(self) -> int:\n return self._quantity", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def batch_size(self) -> int:\n ...", "def get_per_slot_batch_size(self) -> int:\n return self._per_slot_batch_size", "def get_plant_batch_stats(db_path: str) -> int:\n return get_db_count(db_path, 'batches.db', 'batches')", "def qty_increments(self):\n return self._qty_increments", "def nb_cart_items(self):\n return CartItem.objects.filter(cart=self).count()", "def document_count(self):\n return self.client.scard(self.dbprefix + 'docs')", "def batch_size(self) -> ConfigNodePropertyInteger:\n return self._batch_size", "def max_num_batches(self):\n return self._max_num_batches", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def quantity(self) -> int:\n if self._cached_items is not None:\n return sum([item.quantity for item in self._cached_items])\n aggr = self.items.aggregate(quantity=models.Sum('quantity'))\n return aggr['quantity'] or 0", "def get_inventory_count(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def qty(self):\n return self._qty", "def qty(self):\n return self._qty", "def qty(self):\n return self._qty", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self) -> int:\n return len(self.reps_batches)", "def __len__(self):\n return self.limit_batches", "def __len__(self) -> int:\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def __len__(self):\n return int(np.floor(len(self.list_ids) / self.batch_size))", "def size(self) -> int:\n return self.num_items", "def get_total_cells(self):\n return self._get(\"cells\")", "def get_num_chunks(self) -> int:", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))", "def record_batch_size(self):\n return 10000", "def uses(self):\n recipe_count = Quantity.query.filter_by(id_ingredient=self.id).count()\n subrecipe_count = Subquantity.query.filter_by(id_ingredient=self.id).count()\n return recipe_count + subrecipe_count", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')", "def batch_idx(self):\n if self._batch_idx >= self.batch_size:\n self.clear_batch()\n \n return self._batch_idx", "def __len__(self):\n return len(self.batches)", "def count(self):\n return self.get_count()", "def items_count(self):\n return len(self.items)", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def get_nb_results(self):\n return self.nb_results", "def doc_count(self):\n\t\treturn self.index.collection.count()", "def get_num_records(self):\n return self.__num_records", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))", "def document_count(self):\n return self._json['coredata'].get('document-count', '0')", "def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def size(self):\n return self._counter", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def num_of_pages(self) -> int:\n try:\n return int(round(self.__number_of_items / 48))\n except TypeError:\n raise TypeError(\"number_of_items must be of int type\")", "def GetNumberOfResultsProcessed(self) -> int:\n return self.i", "def ndocuments(self):\n return self._ndocuments", "def listing_count(self) -> int:\n return pulumi.get(self, \"listing_count\")", "def acq_batch_size(self):\n return self.batch_size * self.batches_per_acquisition", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def quantity(self):\n return self._quantity", "def count(self):\n return self.size()", "def __len__(self):\r\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def _get_batch_size(self):\n if self.batch_size == 'auto':\n return self._backend.compute_batch_size()\n else:\n # Fixed batch size strategy\n return self.batch_size", "def get_numpins(self):\n return self.numpins", "def get_count(self):\r\n return self.count", "def Count_Documents(db):\r\n \r\n count = db.Transaction.estimated_document_count()\r\n print(\"Number of documents in the database Transaction: \" + str(count) + \".\\n\")\r\n return count", "def numberConsumed(self):\n\n\t\treturn len([bottle for bottle in self.bottles if bottle.consumption != None])", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def get_num_objects(cls):\n return cls.mum_objects", "def size(self):\n\t\treturn self._count", "def get_number_of_items(self):\n return len(self.__item_map)", "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def get_count(self):\n\n\t\treturn self.__count" ]
[ "0.687607", "0.6610767", "0.657245", "0.64142066", "0.63633513", "0.6283254", "0.6276345", "0.6246339", "0.62053406", "0.6200216", "0.61746264", "0.61746264", "0.61746264", "0.61746264", "0.6167673", "0.61566156", "0.6134254", "0.6134254", "0.61117995", "0.6078967", "0.6069716", "0.6061918", "0.6058023", "0.60565996", "0.6046527", "0.6046527", "0.6046527", "0.6046527", "0.60316724", "0.6018395", "0.6017493", "0.60138714", "0.60045695", "0.5988765", "0.5973644", "0.59628713", "0.59599847", "0.5953714", "0.5953714", "0.5953453", "0.5935115", "0.59116936", "0.59116936", "0.59116936", "0.58814096", "0.58814096", "0.58814096", "0.5874305", "0.58713", "0.58631563", "0.5854046", "0.58497757", "0.5843118", "0.5842792", "0.58397883", "0.58382845", "0.58322406", "0.58281237", "0.5818018", "0.58107966", "0.57949764", "0.5776777", "0.5746014", "0.57459545", "0.57451147", "0.5741603", "0.5741603", "0.5741603", "0.5739235", "0.5729002", "0.57236594", "0.5716676", "0.57156193", "0.57128274", "0.5710463", "0.5702942", "0.56997675", "0.5699009", "0.5698829", "0.5694965", "0.5693417", "0.5691165", "0.5682989", "0.5682989", "0.56785977", "0.56775784", "0.5676838", "0.5673745", "0.5671366", "0.5670093", "0.56658983", "0.5654028", "0.5650732", "0.5650251", "0.56434363", "0.563755", "0.5637163", "0.56317306", "0.5631309", "0.5626382" ]
0.65176564
3
Convenience method that sums up all the sentences across all batches.
def get_total_sentences(self): # loop through batches and add up all their individual sentence counts total_sentences = 0 for batch in self.batch_stats: total_sentences += self.batch_stats[batch].total_sentences return total_sentences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def summarize(self, doc):\n import torch\n\n with torch.no_grad():\n answers_input_ids = self.tokenizer.batch_encode_plus(\n [doc], return_tensors=\"pt\", truncation=True, max_length=1024\n )[\"input_ids\"].to(self.torch_device)\n summary_ids = self.model.generate(\n answers_input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n no_repeat_ngram_size=3,\n )\n\n exec_sum = self.tokenizer.decode(\n summary_ids.squeeze(), skip_special_tokens=True\n )\n return exec_sum", "def get_batches(summaries, texts, batch_size):\r\n for batch_i in range(0, len(texts)//batch_size):\r\n start_i = batch_i * batch_size\r\n summaries_batch = summaries[start_i:start_i + batch_size]\r\n texts_batch = texts[start_i:start_i + batch_size]\r\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\r\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\r\n \r\n # Need the lengths for the _lengths parameters\r\n pad_summaries_lengths = []\r\n for summary in pad_summaries_batch:\r\n pad_summaries_lengths.append(len(summary))\r\n \r\n pad_texts_lengths = []\r\n for text in pad_texts_batch:\r\n pad_texts_lengths.append(len(text))\r\n \r\n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def get_batches(summaries, texts, batch_size):\n for batch_i in range(0, len(texts)//batch_size):\n start_i = batch_i * batch_size\n summaries_batch = summaries[start_i:start_i + batch_size]\n texts_batch = texts[start_i:start_i + batch_size]\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\n \n # Need the lengths for the _lengths parameters\n pad_summaries_lengths = []\n for summary in pad_summaries_batch:\n pad_summaries_lengths.append(len(summary))\n \n pad_texts_lengths = []\n for text in pad_texts_batch:\n pad_texts_lengths.append(len(text))\n \n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def run_summarized_text(text,lines):\r\n \r\n #text_preprocessing\r\n words = word_tokenize(text)\r\n # print(words)\r\n print(\"\\n\")\r\n ps = PorterStemmer()\r\n lem = WordNetLemmatizer()\r\n stopWords = set(stopwords.words(\"english\"))\r\n # print(stopWords)\r\n print(\"\\n\")\r\n # 1 Create the word frequency table\r\n freq_table = calc_weighted_frequency(words,ps,lem,stopWords,text)\r\n\r\n '''\r\n We already have a sentence tokenizer, so we just need \r\n to run the sent_tokenize() method to create the array of sentences.\r\n '''\r\n\r\n # 2 Tokenize the sentences\r\n sentences = sent_tokenize(text)\r\n print(sentences)\r\n print(\"\\n\")\r\n\r\n # 3 Important Algorithm: score the sentences\r\n sentence_scores = get_sentence_score(sentences, freq_table)\r\n\r\n #\r\n\r\n # 4 Important Algorithm: Generate the summary\r\n summary = generate_summary(sentence_scores,lines)\r\n\r\n return summary", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def __call__(self, docs_batch: List[str]) -> Tuple[List[List[str]], List[List[int]]]:\n text_batch_list = []\n text_batch = []\n nums_batch_list = []\n nums_batch = []\n count_texts = 0\n text = \"\"\n curr_doc = 0\n for n, doc in enumerate(docs_batch):\n sentences = sent_tokenize(doc)\n for sentence in sentences:\n if len(text) + len(sentence) < self.max_chunk_len and n == curr_doc:\n text += f\"{sentence} \"\n else:\n if count_texts < self.batch_size:\n text_batch.append(text.strip())\n if n == curr_doc:\n nums_batch.append(n)\n else:\n nums_batch.append(n - 1)\n count_texts += 1\n else:\n text_batch_list.append(text_batch)\n text_batch = []\n nums_batch_list.append(nums_batch)\n nums_batch = [n]\n count_texts = 0\n curr_doc = n\n text = f\"{sentence} \"\n\n if text:\n text_batch.append(text.strip())\n text_batch_list.append(text_batch)\n nums_batch.append(len(docs_batch) - 1)\n nums_batch_list.append(nums_batch)\n\n return text_batch_list, nums_batch_list", "def bag_of_words(batch, TEXT):\n V = len(TEXT.vocab)\n X = torch.zeros(batch.text.size(0), V)\n ones = torch.ones(batch.text.size(1))\n for b in range(batch.text.size(0)):\n X[b].index_add_(0, batch.text.data[b], ones)\n X[b][TEXT.vocab.stoi['<pad>']] = 0\n X = Variable(X, requires_grad=False)\n return X", "def batch_sentences(self, sentences, bos, eos, indices=None):\n batch_size = len(sentences)\n slen = max([len(s) for s in sentences])\n if bos:\n slen += 1\n if eos:\n slen += 1\n \n def pad_sent(s, max_len, bos, eos):\n ret = s\n if bos:\n ret = [self.bos_index] + ret\n if eos:\n ret = ret + [self.eos_index]\n ret = ret + [self.pad_index for _ in range(max_len - len(ret))]\n return ret\n\n sent_tensor = [pad_sent(s, slen, bos, eos) for s in sentences]\n sent_tensor = torch.from_numpy(np.array(sent_tensor)).long()\n \n if indices is None:\n return sent_tensor\n else:\n return sent_tensor, indices", "def yield_batches(self, texts):\n batch = []\n for text in self._iter_texts(texts):\n batch.append(text)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def summarize(self, text, text_index, n):\r\n self.text_index = text_index\r\n sentences = sent_tokenize(text)\r\n if len(sentences) < n:\r\n raise ValueError(\"Cannot extract %s sentences from text with %s sentences\" % \\\r\n (n, len(sentences)))\r\n preprText = self.preprocess_document(text)\r\n words = self.word_tokenize_preprocessed(preprText)\r\n tfIdfTable = self._create_tf_idf_table(words)\r\n # print({k: v for k, v in sorted(freqTable.items(), key=lambda item: item[1], reverse=True)})\r\n sentenceScores = np.array(self._score_sentences(sentences, tfIdfTable))\r\n nBestIndexes = np.argpartition(sentenceScores, -n)[-n:] # indexes of sentences with n best scores\r\n nBestIndexes = sorted(nBestIndexes)\r\n\r\n summary = ''\r\n for index in nBestIndexes:\r\n summary += sentences[index] + \" \"\r\n\r\n self.text_index = None # reset text_index once completed\r\n return summary[:-1] # remove last space\r", "def summarize(self, text, n):\n sents = sent_tokenize(text)\n assert n <= len(sents)\n word_sent = [word_tokenize(s.lower()) for s in sents]\n self._freq = self._compute_frequencies(word_sent)\n ranking = defaultdict(int)\n for i,sent in enumerate(word_sent):\n for w in sent:\n if w in self._freq:\n ranking[i] += self._freq[w]\n sents_idx = self._rank(ranking, n)\n return [sents[j] for j in sents_idx]", "def universal_sentence_embedding(sentences, mask, sqrt=True):\n # need to mask out the padded chars\n sentence_sums = th.bmm(\n sentences.permute(0, 2, 1),\n mask.float().unsqueeze(-1)).squeeze(-1)\n divisor = mask.sum(dim=1).view(-1, 1).float()\n if sqrt:\n divisor = divisor.sqrt()\n sentence_sums /= divisor\n return sentence_sums", "def batchify_summary(batch):\r\n\r\n if type(batch[0][1]) != torch.LongTensor:\r\n no_elmo, use_char = (True, False) if batch[0][1] == -2 else (False, False)\r\n else:\r\n no_elmo, use_char = True, True\r\n\r\n docs = [ex[0] for ex in batch]\r\n docs_char = [ex[1] for ex in batch]\r\n summaries = [ex[2] for ex in batch]\r\n\r\n # Batch documents\r\n max_doc_length = max([d.size(0) for d in docs])\r\n x1_len = torch.LongTensor(len(docs)).zero_()\r\n x1 = torch.LongTensor(len(docs),\r\n max_doc_length).zero_() if no_elmo else torch.LongTensor(len(docs),\r\n max_doc_length,\r\n 50).zero_()\r\n x1_char = torch.LongTensor(len(docs),\r\n max_doc_length,\r\n docs_char[0].size(1)).zero_() if (no_elmo and use_char) else None\r\n for i, d in enumerate(docs):\r\n x1_len[i] = d.size(0)\r\n x1[i, :d.size(0)].copy_(d)\r\n if not no_elmo:\r\n x1_char[i, :d.size(0), :].copy_(docs_char[i])\r\n\r\n # Batch answers\r\n max_ans_length = max([a.size(0) for a in summaries])\r\n ans_len = torch.LongTensor(len(summaries)).zero_()\r\n ans = torch.LongTensor(len(summaries), max_ans_length).zero_()\r\n for i, a in enumerate(summaries):\r\n ans_len[i] = a.size(0)\r\n ans[i, :a.size(0)].copy_(a)\r\n\r\n ids = [ex[3] for ex in batch]\r\n contexts = [ex[4] for ex in batch]\r\n # FIXME: multiple answers are possible, fix vectorize also.\r\n targets = [ex[5] for ex in batch]\r\n src_vocabs = [ex[6] for ex in batch]\r\n source_maps = []\r\n alignments = []\r\n\r\n # Prepare source vocabs, alignment [required for Copy Attention]\r\n for eid, context, target, (token2idx, idx2token) in \\\r\n zip(ids, contexts, targets, src_vocabs):\r\n # Mapping source tokens to indices in the dynamic dict.\r\n src_map = torch.LongTensor([token2idx[w] for w in context])\r\n source_maps.append(src_map)\r\n\r\n # TODO: does skipping the first and last token in answer valid?\r\n mask = torch.LongTensor([token2idx[w] if w in token2idx\r\n else UNK for w in target])\r\n alignments.append(mask)\r\n\r\n return {'doc_rep': x1,\r\n 'doc_char_rep': x1_char,\r\n 'doc_len': x1_len,\r\n 'summ_rep': ans,\r\n 'summ_len': ans_len,\r\n 'ids': ids,\r\n 'documents': contexts,\r\n 'answers': targets,\r\n 'source_vocabs': src_vocabs,\r\n 'src_map': source_maps,\r\n 'alignment': alignments}", "def hf_summarizer(sentences):\n\n max_chunk = 512\n current_chunk = 0\n chunks = []\n\n for sentence in sentences:\n if len(chunks) == current_chunk +1 :\n if len(chunks[current_chunk]) + len(sentence.split()) <= max_chunk:\n chunks[current_chunk].extend(sentence.split())\n else:\n current_chunk += 1\n chunks.append(sentence.split())\n else:\n print(current_chunk)\n chunks.append(sentence.split())\n\n # print(chunks[0])\n\n for chunk_id in range(len(chunks)):\n chunks[chunk_id] = ' '.join(chunks[chunk_id])\n\n #print(len(chunks[0].split()))\n\n summarizer = pipeline(\"summarization\")\n summarized = summarizer(chunks, min_length = 50, max_length = 100, do_sample=False)\n\n text = ''.join([sum[\"summary_text\"] for sum in summarized])\n\n with open(\"static/files/book.txt\", \"w\",encoding=\"utf-8\") as f:\n f.write(text)\n \n return summarized", "def _score_sentence(self, feats, tags):\n score = torch.zeros((self.batch_size,1), device=self.device)\n tags = torch.cat([torch.full((self.batch_size, 1, 1), self.tag2idx[START_TAG], dtype=torch.long, device=self.device), tags],dim=1)\n for i in range(feats.shape[1]):\n feat = feats[:,i,:]\n \n score = score + self.transitions[tags[:,i+1], tags[:,i]] + feat.gather(dim=-1, index=tags[:,i+1])\n \n score = score + self.transitions[self.tag2idx[STOP_TAG], tags[:,-1]]\n\n return score", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def batch_sentences_v2(sentences, lm_labels=None):\n # sentences = sorted(sentences, key=lambda x: len(x), reverse=True)\n lengths = torch.LongTensor([len(s) + 2 for s in sentences])\n sent = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(1)\n if lm_labels is not None:\n _labels = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(-1)\n\n sent[0] = 0\n for i, s in enumerate(sentences):\n if lengths[i] > 2: # if sentence not empty\n sent[1:lengths[i] - 1, i].copy_(torch.from_numpy(s.astype(np.int64)))\n if lm_labels is not None:\n lm = np.array(lm_labels[i])\n _labels[1:lengths[i] - 1, i].copy_(torch.from_numpy(lm.astype(np.int64)))\n sent[lengths[i] - 1, i] = 2\n if lm_labels is not None:\n _labels[lengths[i] - 1, i] = -1\n\n if lm_labels is not None:\n return sent, lengths, _labels\n return sent, lengths", "def lstm_summarize(text, query, lstm_model, nn_model, stopwords, word_indices, limit = 250, remove_stop_words = True,with_txt_vect=False):\n if remove_stop_words : \n stopwords = stop_words()\n else :\n stopwords = []\n \n if with_txt_vect :\n text_vector = lstm_infer_vector(lstm_model, text, stopwords,word_indices)\n \n query_vector = lstm_infer_vector(lstm_model, query, stopwords,word_indices)\n \n summary = \"\"\n summary_vector = np.zeros(400)\n summary_idx = []\n \n sentences = text.split('.')\n sentences = np.asarray(sentences)\n \n remaining_sentences = copy.copy(sentences)\n \n size = 0\n counter = 0\n while size < limit and len(remaining_sentences)>0 :\n counter = counter+1\n scores = []\n for sentence in remaining_sentences :\n sentence_vector = lstm_infer_vector(lstm_model, sentence, stopwords,word_indices)\n if with_txt_vect :\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector, text_vector])\n else:\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector])\n nn_input = np.asarray([nn_input]) # weird but it is important to do it\n score = nn_model.predict(nn_input) \n scores.append(score)\n #print(scores)\n max_idx_rem = int(np.argmax(scores))\n idx_selected_sentence = np.arange(len(sentences))[sentences == remaining_sentences[max_idx_rem]]\n idx_selected_sentence = int(idx_selected_sentence[0])\n size += len(remaining_sentences[max_idx_rem].split())\n \n remaining_sentences = list(remaining_sentences)\n del remaining_sentences[max_idx_rem]\n bisect.insort_left(summary_idx,idx_selected_sentence)\n\n summary = \"\"\n\n for idx in summary_idx:\n summary = summary + \" \" + sentences[idx]\n\n summary_vector = lstm_infer_vector(lstm_model, summary, stopwords,word_indices)\n\n return summary", "def queue_all_texts(self, q, texts, window_size):\n for batch_num, batch in enumerate(self.yield_batches(texts)):\n q.put(batch, block=True)\n before = self._num_docs / self.log_every\n self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)\n if before < (self._num_docs / self.log_every):\n logger.info(\n \"%d batches submitted to accumulate stats from %d documents (%d virtual)\",\n (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self", "def processCorpus(self, texts: [Text]):\n \n remaining_texts = texts\n curr_texts = []\n processed_texts = []\n cases = []\n num_failed_test_cases = []\n num_failed_test_cases_per_asr = {}\n num_processed_texts = []\n for asr in self.asrs:\n num_failed_test_cases_per_asr[asr.getName()] = []\n \n for i in range(self.num_iteration):\n # print(f\"Iteration: {i+1}\")\n \n if self.text_batch_size :\n curr_texts = remaining_texts[:self.text_batch_size]\n remaining_texts = remaining_texts[self.text_batch_size:]\n else : # use global visibility\n curr_texts = remaining_texts\n\n if len(curr_texts) > 0 :\n \n curr_cases, curr_processsed_texts, unprocessed_texts = self.processOneIteration(curr_texts, processed_texts, cases)\n cases.extend(curr_cases)\n processed_texts.extend(curr_processsed_texts)\n if self.text_batch_size :\n remaining_texts.extend(unprocessed_texts)\n else :\n remaining_texts = unprocessed_texts\n\n num_failed_test_cases.append(calculate_cases(cases, mode=FAILED_TEST_CASE))\n for asr in self.asrs:\n num_failed_test_cases_per_asr[asr.getName()].append(calculate_cases_per_asr(\n cases, mode=FAILED_TEST_CASE, asr_name=asr.getName()))\n num_processed_texts.append(len(processed_texts))\n else :\n print(\"Texts are not enough!\")\n \n # shuffle the remaining texts\n np.random.shuffle(remaining_texts)\n \n data = {}\n data[\"number_of_failed_test_cases_all\"] = num_failed_test_cases\n data[\"number_of_failed_test_cases_per_asr\"] = num_failed_test_cases_per_asr\n data[\"number_of_processed_texts\"] = num_processed_texts\n with open(self.outputfile_failed_test_case, 'w') as outfile:\n json.dump(data, outfile, indent=2, sort_keys=True)\n\n if self.target_asr :\n self.saveFailedTestCases(processed_texts, cases)", "def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()", "def batchify(self, i, iterator):\n print(f'Starting Batch {i}')\n iterator = [item.strip() for item in iterator]\n max_length = self.max_seq_length - 2 # for special tokens\n\n batches = []\n n = len(iterator)\n sentence_count = 0\n index_start = 0\n index_stop = 0\n\n while index_stop < n:\n if (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length):\n index_start += 1\n index_stop += 1\n while (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length) and (index_stop<n):\n index_stop += 1\n batches.append(iterator[index_start:index_stop])\n index_start = index_stop\n print(f'Batch {i} Done')\n return batches", "def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)", "def foreach_sentence(layer: Model, drop_factor: float = 1.0) -> Model:\n\n def sentence_fwd(docs: List[Doc], drop: Dropout = 0.0) -> Tuple[Acts, Callable]:\n if not all(doc.is_sentenced for doc in docs):\n return layer.begin_update([d[:] for d in docs], drop=drop)\n sents = flatten_list([list(doc.sents) for doc in docs])\n words_per_doc = [len(d._.get(ATTRS.word_pieces)) for d in docs]\n words_per_sent = [len(s._.get(ATTRS.word_pieces)) for s in sents]\n sents_per_doc = [len(list(d.sents)) for d in docs]\n assert sum(words_per_doc) == sum(words_per_sent)\n acts, bp_acts = layer.begin_update(sents, drop=drop)\n # To go from \"per sentence\" activations to \"per doc\" activations, we\n # just have to tell it where the sequences end.\n acts.lh.lengths = words_per_doc\n acts.po.lengths = sents_per_doc\n\n def sentence_bwd(d_acts: Acts, sgd: Optional[Optimizer] = None) -> None:\n assert isinstance(d_acts, Acts)\n # Translate back to the per-sentence activations\n if d_acts.has_lh:\n assert d_acts.lh.data.shape[0] == sum(d_acts.lh.lengths)\n assert d_acts.lh.lengths == words_per_doc\n d_acts.lh.lengths = words_per_sent\n d_acts.po.lengths = [1 for _ in words_per_sent]\n d_ids = bp_acts(d_acts, sgd=sgd)\n if not (d_ids is None or all(ds is None for ds in d_ids)):\n raise ValueError(\"Expected gradient of sentence to be None\")\n return d_ids\n\n return acts, sentence_bwd\n\n return wrap(sentence_fwd, layer)", "def batch_sentences(sentences, lg_ids=None):\n # sentences = sorted(sentences, key=lambda x: len(x), reverse=True)\n lengths = torch.LongTensor([len(s) + 2 for s in sentences])\n sent = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(1)\n if lg_ids is not None:\n lgs = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(4)\n else:\n lgs = None\n sent[0] = 0\n for i, s in enumerate(sentences):\n if lengths[i] > 2: # if sentence not empty\n sent[1:lengths[i] - 1, i].copy_(torch.from_numpy(s.astype(np.int64)))\n sent[lengths[i] - 1, i] = 2\n if lg_ids is not None:\n lgs[:, i] = lg_ids[i]\n\n if lgs is None:\n return sent, lengths\n return sent, lengths, lgs", "def summarize(self, text, n=5):\n sents = sent_tokenize(text)\n assert n <= len(sents)\n word_sent = [word_tokenize(s.lower()) for s in sents]\n self._freq = self._compute_frequencies(word_sent)\n ranking = defaultdict(int)\n for i, sent in enumerate(word_sent):\n for w in sent:\n if w in self._freq:\n ranking[i] += self._freq[w]\n sents_idx = self._rank(ranking, n)\n return [sents[j] for j in sents_idx]", "def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass", "def bag_of_words(tokenized_sentence, all_words):\n\n tokenized_sentence = [stem(w) for w in tokenized_sentence]\n #print(tokenized_sentence)\n bag = np.zeros_like(all_words, dtype=np.float32)\n for idx, w in enumerate(all_words):\n if w in tokenized_sentence:\n bag[idx] = 1.0\n\n return bag", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def _batch_thm_embedding(self, thms: List[Text]) -> List[THM_EMB_TYPE]:\n # The checkpoint should have exactly one value in this collection.\n thms = self._thm_string_for_predictions(thms)\n embeddings = self._sess.run(\n fetches=self._graph.get_collection('thm_net'),\n feed_dict={self._graph.get_collection('thm_string')[0]: thms})[0]\n return embeddings", "def _aggregate_text_embedding(self, token_ids, embeddings):\n if self._hparams['obj_text_aggregation'] == 'max':\n # Find valid tokens (not PADDING/EOS/UNK/START).\n valid_token_mask = tf.greater_equal(token_ids, 4)\n # Use large negative bias for invalid tokens.\n invalid_token_bias = tf.cast(\n tf.logical_not(valid_token_mask), tf.float32) * -1e9\n # [batch, node_num, word_num, hidden_size]\n embeddings = embeddings + tf.expand_dims(invalid_token_bias, axis=-1)\n # Max value for each dimension, [batch, node_num, hidden_size].\n embeddings = tf.reduce_max(embeddings, axis=-2)\n # For objects with no text, use 0.\n valid_object_mask = tf.cast(\n tf.reduce_any(valid_token_mask, axis=-1), tf.float32)\n embeddings = embeddings * tf.expand_dims(valid_object_mask, axis=-1)\n\n elif self._hparams['obj_text_aggregation'] == 'sum':\n # [batch, step, #max_obj, #max_token] 0 for padded tokens\n real_objects = tf.cast(tf.greater_equal(token_ids, 4), tf.float32)\n # [batch, step, #max_obj, hidden] 0s for padded objects\n embeddings = tf.reduce_sum(\n input_tensor=embeddings * tf.expand_dims(real_objects, 3), axis=-2)\n\n else:\n raise ValueError('Unrecognized token aggregation %s' %\n (self._hparams['obj_text_aggregation']))\n return embeddings", "def train(self, n):\n t = self.t\n\n parallel_sentences = list(zip(self.target,self.source))\n\n for i in range(n):\n\n count = defaultdict(lambda:defaultdict(int))\n s_total = dict()\n total = defaultdict(int)\n\n for E,F in parallel_sentences:\n # compute normalization\n for e in E:\n t_e = t[e]\n s_total[e] = 0\n for f in F:\n s_total[e] += t_e[f]\n\n # collect counts\n for e in E:\n count_e = count[e]\n t_e = t[e]\n s_total_e = s_total[e]\n for f in F:\n tmp = t_e[f] / s_total_e\n count_e[f] += tmp\n total[f] += tmp\n\n # estimate probabilities\n for e in self.t_words:\n t_e = t[e]\n count_e = count[e]\n #for f in self.s_words:\n for f in count_e:\n #if f not in count[e]: continue\n t_e[f] = count_e[f] / total[f]", "def countMutatedSentences(sentence):\n # BEGIN_YOUR_ANSWER (our solution is 17 lines of code, but don't worry if you deviate from this)\n words = sentence.split()\n distinct_words = set(words)\n\n pairs = {word: set() for word in distinct_words}\n cache = [{j: None for j in distinct_words} for i in range(len(words))]\n\n for cur_word, next_word in zip(words[:-1], words[1:]):\n pairs[cur_word].add(next_word)\n\n def count_mutate_sentences(target_word: str, current_length: int, target_length: int):\n if current_length == target_length:\n return 1\n else:\n result = 0\n for word in pairs[target_word]:\n if cache[current_length - 1][word] is None:\n cache[current_length - 1][word] = count_mutate_sentences(word, current_length + 1, target_length)\n result += cache[current_length - 1][word]\n\n return result\n\n return sum([count_mutate_sentences(word, 1, len(words)) for word in distinct_words])\n # END_YOUR_ANSWER", "def summarize(text, query, d2v_model, nn_model, limit = 250, remove_stop_words = True,with_txt_vect=False):\n if remove_stop_words : \n stopwords = stop_words()\n else :\n stopwords = []\n \n if with_txt_vect :\n text_prep = gensim.utils.simple_preprocess(text, deacc=True)\n text_vector = d2v_model.infer_vector(remove_stopwords(text_prep,stopwords))\n\n \n query_prep = gensim.utils.simple_preprocess(query, deacc=True)\n query_vector = d2v_model.infer_vector(remove_stopwords(query_prep,stopwords))\n \n summary = \"\"\n summary_vector = d2v_model.infer_vector([\"\"])\n summary_idx = []\n \n sentences = text.split('.')\n sentences = np.asarray(sentences)\n \n remaining_sentences = copy.copy(sentences)\n \n size = 0\n counter = 0\n while size < limit and len(remaining_sentences)>0 :\n counter = counter+1\n scores = []\n for sentence in remaining_sentences :\n sentence_prep = gensim.utils.simple_preprocess(sentence, deacc=True)\n sentence_vector = d2v_model.infer_vector(sentence_prep)\n if with_txt_vect :\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector, text_vector])\n else:\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector])\n nn_input = np.asarray([nn_input]) # weird but it is important to do it\n score = nn_model.predict(nn_input) \n scores.append(score)\n #print(scores)\n max_idx_rem = int(np.argmax(scores))\n idx_selected_sentence = np.arange(len(sentences))[sentences == remaining_sentences[max_idx_rem]]\n idx_selected_sentence = int(idx_selected_sentence[0])\n size += len(remaining_sentences[max_idx_rem].split())\n \n remaining_sentences = list(remaining_sentences)\n del remaining_sentences[max_idx_rem]\n bisect.insort_left(summary_idx,idx_selected_sentence)\n\n summary = \"\"\n\n for idx in summary_idx:\n summary = summary + \" \" + sentences[idx]\n\n summary_prep = gensim.utils.simple_preprocess(summary, deacc=True)\n summary_vector = d2v_model.infer_vector(summary_prep)\n\n return summary", "def evaluate(self, text):\n text = ' '.join(['<'] * (self.n - 1) + [text.replace(' . ', ' .%s ' % (' <' * (self.n - 1)))])\n tokens = self.split(text)\n sum = 0\n for i in range(len(tokens) - self.n + 1):\n n_gram = self.join(tokens[i: i + self.n])\n prob = self.get_probability(n_gram, True)\n sum += math.log(prob, self.log_base)\n return sum", "def batch_split(self, batch_text, threads=8):\n pass", "def average_length(sentences, padding_word=\"PAD\"):\n global trainset_average_length\n number_of_all = 0\n sum = 0\n averaged_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n sum = sum + len(sentence)\n number_of_all = number_of_all + 1\n average = int(sum / number_of_all)\n average = 35572\n trainset_average_length = average\n for i in range(len(sentences)):\n sentence = sentences[i]\n if len(sentence) < average:\n num_padding = average - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n averaged_sentences.append(new_sentence)\n elif len(sentence) > average:\n new_sentence = sentence[:average]\n averaged_sentences.append(new_sentence)\n else:\n averaged_sentences.append(sentence)\n print('Average Length is: ' + str(average))\n return averaged_sentences", "def sent_to_words(self, sentences):\n\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence)))", "def generate_corpus(self, text):\n if isinstance(text, str):\n sentences = self.sentence_split(text)\n else:\n sentences = []\n for line in text:\n sentences += self.sentence_split(line)\n passing = filter(self.test_sentence_input, sentences)\n runs = map(self.word_split, passing)\n return runs", "def total_chunks(self) -> global___Expression:", "def _sum_over_dicts(total_n_grams: Dict[int, Tensor], n_grams: Dict[int, Tensor]) ->Dict[int, Tensor]:\n for n in n_grams:\n total_n_grams[n] += n_grams[n]\n return total_n_grams", "def train(self, corpus): \n # TODO your code here\n \n for sentence in corpus.corpus:\n for i,dotum in enumerate(sentence.data[1:]):\n self.vocab[dotum.word][sentence.data[i].word] +=1\n self.word_counts[sentence.data[i].word] +=1\n self.total +=1\n self.v = len(self.vocab.keys())", "def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)", "def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]", "def collate_sentences(batch: List[Tuple]):\n # fill this list with all the labels in the batch\n batch_labels = []\n\n # we need to find the maximum length of a sentence in this batch\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n\n # print('batch size',batch_size)\n # initialize a Tensor filled with zeros (aka index of <PAD>)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n\n # fill each row idx in batch_sentences with the corresponding\n # sequence tensor\n #\n # ... batch_sentences[idx, ...] = ...\n for idx in range(0, batch_size):\n # print(idx)\n # print(len(batch[idx][0]))\n # print(len(batch_sentences[idx]))\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n # print(batch_sentences[idx])\n print(type(batch_labels))\n # batch_labels = [torch.LongTensor(x) for x in batch_labels]\n batch_labels = torch.tensor(batch_labels)\n # print(batch_labels)\n return batch_sentences, batch_labels", "def train(self, corpus): \n\n # Generate all possible n-grams\n # for every sentence in the corpus\n for sentence in corpus:\n\n #for every possible gram-length in the sentence\n for gramlength in xrange(1,len(sentence)):\n\n #iterate through all possible grams of that gramlength\n for i in xrange(len(sentence) - gramlength):\n\n #generate tuple\n key = ();\n for index in xrange(gramlength):\n key += (sentence[i + index],);\n\n if(gramlength == 2):\n self.continuationProb[key[1]].add(key[0]);\n\n self.ngramCounts[key] += 1;\n\n self.total = len(set(map(lambda tup: tup[0], self.ngramCounts)));", "def anlSentence(self, sentence):\n cleanStr = re.sub(self._wrdSeps, \" \",\n re.sub(self._stcSeps, \"\", sentence))\n for word in cleanStr.split():\n self._wordCounter[word] += 1\n self._totalWords += 1\n else:\n self._totalSentences += 1", "def train_sentence_dm(model, sentence, lbls, alpha, work=None, neu1=None, train_words=True, train_lbls=True):\n lbl_indices = [lbl.index for lbl in lbls if lbl is not None]\n lbl_sum = np_sum(model.syn0[lbl_indices], axis=0)\n lbl_len = len(lbl_indices)\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.\n\n for pos, word in enumerate(sentence):\n if word is None:\n continue # OOV word in the input sentence => skip\n reduced_window = random.randint(model.window) # `b` in the original doc2vec code\n start = max(0, pos - model.window + reduced_window)\n window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.syn0[word2_indices], axis=0) + lbl_sum # 1 x layer1_size\n if word2_indices and model.cbow_mean:\n l1 /= (len(word2_indices) + lbl_len)\n neu1e = train_cbow_pair(model, word, word2_indices, l1, alpha, neg_labels, train_words, train_words)\n if train_lbls:\n model.syn0[lbl_indices] += neu1e\n\n return len([word for word in sentence if word is not None])", "def __init__(self,sentences):\n self.data_set = sentences\n self.sum1=0\n for sentence in self.data_set:\n sentence.weight = 1/len(self.data_set)\n self.sum1 += sentence.weight\n\n self.sum2=1", "def _score_sentences(self, sentences, tfIdfTable) -> dict:\r\n sentenceScores = []\r\n sentIndex = 0\r\n\r\n for sentence in sentences:\r\n words = set(self.word_tokenize_preprocessed(sentence))\r\n sentScore = 0\r\n for word in words:\r\n if word in tfIdfTable:\r\n sentScore += tfIdfTable[word]\r\n\r\n if len(words) > 0:\r\n sentScore /= len(words)\r\n sentenceScores.append(sentScore)\r\n sentIndex += 1\r\n\r\n return sentenceScores", "def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)", "def total_estimated_words(self):\n return len(self.sentence) / 5", "def get_sentiment(self, sentances):\n sentiment_total = 0\n # Add each sentances combined sentiment to a total tally\n for sentance in sentances:\n sentiment = self.sentiment_analyzer.polarity_scores(sentance)\n sentiment_total += sentiment['compound']\n return sentiment_total / len(sentances)", "def __call__(self, batch_docs: List[Union[str, List[str]]]) -> \\\n List[Union[List[str], List[List[str]]]]:\n\n result = []\n\n for docs in batch_docs:\n batch_chunks = []\n if isinstance(docs, str):\n docs = [docs]\n for doc in docs:\n if self.paragraphs:\n split_doc = doc.split('\\n\\n')\n split_doc = [sd.strip() for sd in split_doc]\n split_doc = list(filter(lambda x: len(x) > 40, split_doc))\n batch_chunks.append(split_doc)\n else:\n doc_chunks = []\n if self.keep_sentences:\n sentences = sent_tokenize(doc)\n n_tokens = 0\n keep = []\n for s in sentences:\n n_tokens += len(s.split())\n if n_tokens > self.tokens_limit:\n if keep:\n doc_chunks.append(' '.join(keep))\n n_tokens = 0\n keep.clear()\n keep.append(s)\n if keep:\n doc_chunks.append(' '.join(keep))\n batch_chunks.append(doc_chunks)\n else:\n split_doc = doc.split()\n doc_chunks = [split_doc[i:i + self.tokens_limit] for i in\n range(0, len(split_doc), self.tokens_limit)]\n batch_chunks.append(doc_chunks)\n result.append(batch_chunks)\n\n if self.flatten_result:\n if isinstance(result[0][0], list):\n for i in range(len(result)):\n flattened = list(chain.from_iterable(result[i]))\n result[i] = flattened\n\n return result", "def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))", "def inline_sum(summands, seed):\n for r in summands:\n seed += r\n return seed", "def inline_sum(summands, seed):\n for r in summands:\n seed += r\n return seed", "def full_ne_list_and_pos_amount(self):\n #open file\n with open(self.lang + '.txt') as file:\n for paragraph in file:\n sentences = tokenize.sent_tokenize(paragraph)\n for sentence in sentences:\n #instance of the named_entity_methods_sentence class\n inst = named_entity_methods_sentence(sentence, self.lang)\n #save into a list all NEs of the text and update the total\n #number of nouns and numerals\n if self.method == 'stanford':\n self.named_entity_list_total.append(inst.named_entity_list_stanford_nlp())\n self.amount_nouns_and_num_total += inst.amount_nouns_and_numerals_stanford_nlp()\n elif self.method == 'spacy':\n self.named_entity_list_total.append(inst.named_entity_list_spacy())\n self.amount_nouns_and_num_total += inst.amount_nouns_and_numerals_spacy()\n return", "def batch_apply(self, batch, is_train=False, stats=None, **kwargs):\n if self.max_context == 0:\n return batch\n trf_batch = []\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = 0\n\n for ex, _, cid in batch:\n if ex[\"tgt\"] is not None:\n cur_len = max(len(doc[\"src\"] + ex[\"src\"]), len(doc[\"tgt\"] + ex[\"tgt\"]))\n\n if len(ex[\"src\"]) == 0 and len(ex[\"tgt\"]) == 0:\n # doc break we add it, restart new doc\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n elif cur_len > self.doc_length:\n if len(doc[\"src\"]) == 0:\n # case 1st ex is already longer\n trf_batch.append((ex, self, cid))\n else:\n # adding cur ex is too long we add cur doc\n # and reset doc to cur ex\n trf_batch.append((doc, self, cid))\n doc = copy.deepcopy(ex)\n else:\n if len(doc[\"src\"]) == 0:\n # we start the new doc with cur ex\n doc = copy.deepcopy(ex)\n else:\n # we cumulate cur ex to cur doc\n doc[\"src\"] += [DefaultTokens.SEP] + ex[\"src\"]\n doc[\"src_original\"] += [DefaultTokens.SEP] + ex[\"src_original\"]\n doc[\"tgt\"] += [DefaultTokens.SEP] + ex[\"tgt\"]\n doc[\"tgt_original\"] += [DefaultTokens.SEP] + ex[\"tgt_original\"]\n nb_ctx = doc[\"src\"].count(DefaultTokens.SEP)\n if nb_ctx >= self.max_context:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"tgt\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n else:\n cur_len = len(doc[\"src\"] + ex[\"src\"])\n doc[\"tgt\"] = None\n if len(ex[\"src\"]) == 0:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n elif cur_len > self.doc_length:\n if len(doc[\"src\"]) == 0:\n trf_batch.append((ex, self, cid))\n else:\n trf_batch.append((doc, self, cid))\n doc = copy.deepcopy(ex)\n else:\n if len(doc[\"src\"]) == 0:\n doc = copy.deepcopy(ex)\n else:\n doc[\"src\"] += [DefaultTokens.SEP] + ex[\"src\"]\n doc[\"src_original\"] += [DefaultTokens.SEP] + ex[\"src_original\"]\n nb_ctx = doc[\"src\"].count(DefaultTokens.SEP)\n if nb_ctx >= self.max_context:\n trf_batch.append((doc, self, cid))\n doc = {}\n doc[\"src\"] = []\n doc[\"indices\"] = ex[\"indices\"]\n if len(doc[\"src\"]) > 0:\n trf_batch.append((doc, self, cid))\n return trf_batch", "def compute(self, batch: Dataset) -> List[TaggingResponse]: # type: ignore\n syntax_options: SyntaxOptions = assert_not_none(self.config.syntax)\n spacy_model = spacy.load(syntax_options.spacy_model)\n\n utterances = batch[self.config.columns.text_input]\n records: List[TaggingResponse] = []\n\n for utterance in utterances:\n tag: Dict[Tag, bool] = {\n smart_tag: False\n for family in [SmartTagFamily.extreme_length, SmartTagFamily.partial_syntax]\n for smart_tag in SMART_TAGS_FAMILY_MAPPING[family]\n }\n\n doc = spacy_model(clean_utterance(utterance))\n # Remove punctuation for word count and smart tags\n tokens = [token.text for token in doc if not token.is_punct]\n\n if len(tokens) >= syntax_options.long_utterance_min_word:\n tag[SmartTag.long] = True\n if len(tokens) <= syntax_options.short_utterance_max_word:\n tag[SmartTag.short] = True\n\n sub_toks = [tok for tok in doc if (tok.dep_ in syntax_options.subj_tags)]\n obj_toks = [tok for tok in doc if (tok.dep_ in syntax_options.obj_tags)]\n vrb_toks = [tok for tok in doc if (tok.pos_ in self.verb_tags)]\n if not sub_toks:\n tag[SmartTag.no_subj] = True\n if not obj_toks:\n tag[SmartTag.no_obj] = True\n if not vrb_toks:\n tag[SmartTag.no_verb] = True\n\n # Some issues occur with other languages such as french if using doc.sents directly.\n # Hence, we use an English sentencizer that seems to work better for similar languages.\n doc_sentencizer_en = self.spacy_sentencizer_en(clean_utterance(utterance))\n sentence_count = len(list(doc_sentencizer_en.sents))\n if sentence_count > 1:\n tag[SmartTag.multi_sent] = True\n\n adds = {DatasetColumn.word_count: len(tokens)}\n records.append(TaggingResponse(tags=tag, adds=adds))\n\n return records", "def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])", "def sumoflemmas():\n\n wordnet_length= 74374\n\n wordnet_occurrences = 94949 #not unique words\n \"\"\"stepdown = wn.synsets('entity')[0]\n synsets = downtree.downtree(stepdown, [])\n synsets.append(stepdown)\n synsets = set(synsets)\n #wordnet_length = len(set(synsets))\n\n nameset =[]\n #fdist = FreqDist(brown.words())\n for syn in synsets:\n for lem in syn.lemmas():\n nameset.append(lem.count())\n #nameset.append(lem.name())\n\n # for wh in set(nameset):\n # wordnet_occurrences.append(fdist[wh])\n\n # Should give set of numbers, with which to sum\n\n # wordnet_occurrences = sum(wordnet_occurences)\n\n # Not sure why this returns 105000, seems like a reasonable number,\n # For example, 'dog' returns 70 instead of 42. Perhaps it uses a different\n # percentage of the wordnet corpus. Or was counted wrong. Or this one was,\n # Either way, my understanding is that since we are doing a probability with it,\n # the actual number shouldn't matter too much at these ranges, as long as both the\n # numerator and the denominator are done using the same method.\n\n\n wordnet_occurrences = sum(nameset)\"\"\"\n\n return wordnet_occurrences", "def calc_e_final(all_et_lst, size_of_batch):\r\n e_final_lst = []\r\n\r\n for i in range(len(all_et_lst[0])): # For each index of the Etotal list\r\n et_sum = 0 # Sum of ETotal values with same index\r\n for lst in all_et_lst: # For each Etotal list\r\n et_sum += lst[i]\r\n\r\n e_final = (1/size_of_batch) * et_sum\r\n e_final_lst.append(e_final)\r\n\r\n return e_final_lst", "def score(self, sentence):\n score = 0.0\n last_token = None\n for token in sentence:\n if not last_token:\n last_token = token\n continue\n tup = (last_token, token)\n if tup in self.counts:\n score += self.s[tup]\n else: # stupid backoff to add-one smoothed unigram\n if self.s[token]: score += self.s[token]\n else: score += math.log(1.0 * (self.counts[token] + 1) / (self.ntokens * 2))\n last_token = token\n return score", "def em_step(t, eng, fre):\n\t# TODO\n tcount = {}\n total = {}\n for word_eng in t:\n total[word_eng] = 0\n for word_fre in t[word_eng]:\n t[word_eng][word_fre] = 0\n num_sentences = len(eng)\n for i in range(num_sentences):\n list_eng = eng[i].split(\" \")\n list_fre = fre[i].split(\" \")\n for word_fre in set(list_fre):\n denom_c = 0\n for word_eng in set(list_eng):\n denom_c += t[word_eng][word_fre]*list_fre.count(word_fre)\n for word_eng in set(list_eng):\n tcount[word_eng][word_fre] += t[word_eng][word_fre]*list_fre.count(word_fre)*list_eng(word_eng)/denom_c\n total[word_eng] = t[word_eng][word_fre]*list_fre.count(word_fre)*list_eng(word_eng)/denom_c\n for word_eng in total:\n for word_fre in tcound[word_eng]:\n t[word_eng][word_fre] = tcound[word_eng][word_fre]/total[word_eng]", "def gen_bag_of_words_df(self):\n\t\tdef word_vector(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)|set(self.stems)]\n\t\tself.bagofwords = self.dataframe.text.apply(word_vector).replace({np.nan:0})", "def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text", "def batchify(self, observations):\n # valid examples\n exs = [ex for ex in observations if 'text' in ex]\n # the indices of the valid (non-empty) tensors\n valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]\n\n # set up the input tensors\n batchsize = len(exs)\n if batchsize == 0:\n return None, None, None\n # tokenize the text\n parsed_x = [deque(maxlen=self.truncate) for _ in exs]\n for dq, ex in zip(parsed_x, exs):\n dq += self.parse(ex['text'])\n # parsed = [self.parse(ex['text']) for ex in exs]\n max_x_len = max((len(x) for x in parsed_x))\n for x in parsed_x:\n # left pad with zeros\n x.extendleft([self.fairseq_dict.pad()] * (max_x_len - len(x)))\n xs = torch.LongTensor(parsed_x)\n\n # set up the target tensors\n ys = None\n if 'labels' in exs[0]:\n # randomly select one of the labels to update on, if multiple\n labels = [random.choice(ex.get('labels', [''])) for ex in exs]\n parsed_y = [deque(maxlen=self.truncate) for _ in labels]\n for dq, y in zip(parsed_y, labels):\n dq.extendleft(reversed(self.parse(y)))\n for y in parsed_y:\n y.append(self.fairseq_dict.eos())\n # append EOS to each label\n max_y_len = max(len(y) for y in parsed_y)\n for y in parsed_y:\n y += [self.fairseq_dict.pad()] * (max_y_len - len(y))\n ys = torch.LongTensor(parsed_y)\n return xs, ys, valid_inds", "def _merge_embeddings(self, hidden_states:List[List[float]], indices_subwords:List[List[int]]):\n embed_output = []\n # ignore the first and the last tokens which are respectively the [CLS] and [SEP] tokens\n hidden_states = hidden_states[1:-1 ,:]\n sentence_output = []\n for indices_to_merge in indices_subwords:\n # average the embeddings of the subwords of a word \n sentence_output.append(torch.mean(hidden_states[indices_to_merge], axis=0))\n embed_output.append(torch.stack(sentence_output).to(self.device))\n return embed_output", "def partial_accumulate(self, texts, window_size):\n self._current_doc_num = -1\n self._token_at_edge = None\n self._counter.clear()\n\n super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)\n for combo, count in iteritems(self._counter):\n self._co_occurrences[combo] += count\n\n return self", "def generate_words_greedily(self, model, session, X, words_to_idx):\n \n Xorig_clean = self.cleanOutput(X, words_to_idx)\n \n for i in range(len(X)):#iterate over allscentences\n #set eos pointer to eos index\n p_eos = np.argwhere(np.array(X[i])==words_to_idx['<eos>'])[0][0] # 2 is eos but would be better using the dict\n while True:\n #compute predictions\n feed_dict = {self.input_x: np.array(X[i]).reshape((1,29)),\n self.input_y: np.array(X[i]).reshape((1,29))} # input_y is not needed\n \n prediction, sentence_probability = session.run([self.predictions, self.sentence_probability], feed_dict)\n \n lastpred = prediction[0,p_eos-1]\n X[i][p_eos]=lastpred\n \n p_eos += 1\n if lastpred == words_to_idx['<eos>'] or p_eos==29: break\n \n #postprocess X\n Xclean = self.cleanOutput(X, words_to_idx)\n self.create_submission_file(Xorig_clean, task='originalX')\n self.create_submission_file(Xclean, task='continuation')", "def sentences(summary, nlp):\n text = remove_spurious_words(text_of(summary))\n all_sentence = [sentence for sentence in re.split(\"[。,?!\\n]\", text) if sentence]\n all_sentence = [re.sub('[ ]+', ' ', sentence.encode('gb2312', 'ignore').decode('gb2312')).strip() for sentence in\n all_sentence]\n return [nlp.ner(sentence) for sentence in all_sentence if sentence]", "def count_words_sents(self, doc_array):\n total_num_sents = []\n total_num_words = []\n for doc in doc_array:\n sents = sent_tokenize(doc)\n total_num_sents.append(len(sents))\n temp_num_words = []\n for sent in sents:\n num_words = word_tokenize(sent)\n temp_num_words.append(len(num_words))\n total_num_words.append(temp_num_words)\n return np.array(total_num_sents), np.array(total_num_words)", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def loop_example():\n\n totals = []\n\n for row in poke_stats:\n totals.append(sum(row))\n \n return(totals)", "def summarize_ranked_sentences(ranked_sents, summary_len):\n summary = []\n for (sent, value) in ranked_sents:\n summary.append(sent)\n summary_len -= len(sent.split())\n if summary_len <= 5:\n # We stop at n-5 words already, as an effort to avoid being\n # really far off.\n break\n return summary", "def get_sentences(self, batch=None):\n\t\t\n\t\t# loop through the paragraph stream for this document database\n\t\tfor paragraph in self.get_paragraphs(batch):\n\t\t\t# loop through the sentences\n\t\t\tfor sentence in paragraph[\"sentences\"]:\n\t\t\t\t# yield the individual tokens\n\t\t\t\tyield sentence[\"tokens\"]", "def score_sentences(self, document, texts):\n sent_scores = []\n # call word_frequency to get a word frequency table (or rather list of words) from the respective article\n scorable_words = self.word_frequency(texts[self.sent_pos])\n # split the summaries by @highlight token\n summary_split = document.split(\"@ highlight\")\n sentenceValue = 0\n sent_len = 0\n # for each summary calculate the sentence value\n for summary in summary_split:\n words = nltk.word_tokenize(summary)\n sent_len = len(words)\n for word in words:\n if word in scorable_words:\n sentenceValue =+ 1\n # normalise sentence value based on sentence length so that longer sentences do not get an automatic advantage over shorter ones\n # as null rows havent been dropped yet there may be scores of 0\n if (sentenceValue !=0 and sent_len !=0):\n sentenceValue = sentenceValue / sent_len\n sent_scores.append((summary, sentenceValue))\n return sent_scores", "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)", "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)", "def summarize(text, sent_count=default_sents, kp_count=default_kp, idf=None, sg=True):\n summary = \"\"\n\n doc = nlp_pipeline(text)\n\n if sent_count > 0:\n summary = text_summary(doc, sent_count)\n\n top_phrases = []\n\n if kp_count > 0:\n if sg:\n top_phrases = sgrank(doc, kp_count, idf=idf)\n else:\n top_phrases = textrank(doc, kp_count)\n\n return (summary, top_phrases)", "def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences", "def batch_data(source, target, batch_size):\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))", "def convert_to_text(batch, lengths, dico, params):\n batch = batch.cpu().numpy()\n lengths = lengths.cpu().numpy()\n\n slen, bs = batch.shape\n assert lengths.max() == slen and lengths.shape[0] == bs\n assert (batch[0] == params.eos_index).sum() == bs\n assert (batch == params.eos_index).sum() == 2 * bs\n sentences = []\n\n for j in range(bs):\n words = []\n for k in range(1, lengths[j]):\n if batch[k, j] == params.eos_index:\n break\n words.append(dico[batch[k, j]])\n sentences.append(\" \".join(words))\n return sentences", "def get_batches(int_text, batch_size, seq_length):\n n_batches = len(int_text) // (batch_size * seq_length)\n len_int_text = n_batches * (batch_size*seq_length)\n \n x = np.array(int_text[: len_int_text])\n y = np.hstack((np.array(int_text[1: len_int_text]) , np.array(int_text[0]))) #np.hstack()水平合并\n \n x_batches = np.split(x.reshape(batch_size, -1), n_batches, -1)\n y_batches = np.split(y.reshape(batch_size, -1), n_batches, -1)\n \n all_batches= np.array(list(zip(x_batches, y_batches)))\n return all_batches", "def __init__(self, n, sents, gamma=None, addone=True):\n assert n > 0\n self._n = n\n\n if gamma is not None:\n # everything is training data\n train_sents = sents\n else:\n # 90% training, 10% held-out\n m = int(0.45 * len(sents))\n l = int(0.65 * len(sents))\n train_sents = sents[:m] + sents[l:]\n held_out_sents = sents[m:l]\n\n print('Computing counts...')\n count = defaultdict(int)\n while (n >= 0):\n for sent in train_sents:\n s = sent[:] ## En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n # WORKed HERE!!\n # COMPUTE COUNTS FOR ALL K-GRAMS WITH K <= N\n\n # compute vocabulary size for add-one in the last step\n self._addone = addone\n if addone:\n print('Computing vocabulary...')\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc)\n\n # compute gamma if not given\n if gamma is not None:\n self._gamma = gamma\n else:\n print('Computing gamma...')\n self._gamma = gamma = 1\n p = self.log_prob(held_out_sents)\n new_gamma = 2\n streak = 1\n growing = True\n turns = 0\n while (turns < 15):\n self._gamma = new_gamma\n np = self.log_prob(held_out_sents)\n gamma = new_gamma\n if (np > p):\n if growing:\n streak += 1\n else:\n turns += 1\n streak = 0\n growing = True\n new_gamma = new_gamma + 2 ** streak\n else:\n if growing:\n turns += 1\n streak = 0\n growing = False\n else:\n streak += 1\n new_gamma = new_gamma - 2 ** streak\n p = np\n self._gamma = new_gamma\n print(self._gamma)", "def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter", "def merge_sentences(sentences):\n full_sentences = []\n for sentence in sentences:\n for arr_word in sentence:\n full_sentences.append(arr_word)\n return full_sentences", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def predict_sentences(self, sents):\n tkw=self.tkw\n sents_attr=[]\n sent_samples={\n \"word_inputs\":[],\n \"predicate_inputs\":[],\n \"postags_inputs\":[]\n }\n print('prepare data')\n for sid,sent in enumerate(sents):\n if sid % (int(np.ceil(len(sents)/100))) == 0:\n print(sid / len(sents))\n sent_str = \" \".join(sent)\n preds = [(word.i, str(word))\n for word\n in tkw.parser(sent_str)\n if word.tag_.startswith(\"V\")]\n num_of_samples = int(np.ceil(float(len(sent)) / self.sent_maxlen) * self.sent_maxlen)\n pred_list=[]\n for ind, pred in preds:\n cur_sample=self.encode_inputs([self.create_sample(sent, ind)])\n for name in [\"word_inputs\", \"predicate_inputs\", \"postags_inputs\"]:\n sent_samples[name].append(cur_sample[name])\n pred_list.append((ind, pred))\n sents_attr.append((num_of_samples,pred_list,len(sent)))\n for key in sent_samples:\n sent_samples[key]=np.concatenate(sent_samples[key],axis=0)\n print('predict data')\n X = sent_samples\n Y=self.model.predict(X)\n # print(Y[0])\n # print(Y[2])\n res=[]\n p=0\n for attr in sents_attr:\n num_of_samples,pred_list,sent_len=attr\n sample_len=num_of_samples//self.sent_maxlen\n ret=[]\n for pid,(ind, pred) in enumerate(pred_list):\n ret.append(((ind, pred),\n [(self.consolidate_label(label), float(prob))\n for (label, prob) in\n self.transform_output_probs(Y[p+pid*sample_len:p+(pid+1)*sample_len], \n get_prob = True).reshape(num_of_samples,\n 2)[:sent_len]]))\n res.append(ret)\n p+=len(pred_list)*sample_len\n return res", "def get_sentence_score(sentences, word_frequencies):\r\n sentence_scores = dict()\r\n for sent in sentences:\r\n word_count_without_stopwords=0\r\n for word in word_tokenize(sent.lower()):\r\n if word in word_frequencies.keys():\r\n word_count_without_stopwords+=1 \r\n if len(sent.split(' ')) < 30:\r\n if sent not in sentence_scores.keys():\r\n sentence_scores[sent] = word_frequencies[word]\r\n else:\r\n sentence_scores[sent] += word_frequencies[word]\r\n \r\n if sent in sentence_scores:\r\n sentence_scores[sent] = sentence_scores[sent]/word_count_without_stopwords\r\n \r\n print(sentence_scores) \r\n return sentence_scores", "def fit(self, documents):\n n_words_trained = 0\n tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(\n documents)\n n_tokens = len(tokens)\n n_vocab = len(self.vocab)\n words_per_epoch = n_vocab / self.n_epochs\n self._cum_dist = self._build_cum_dist()", "def fit(self):\n sentences = ''.join(self.__sentences) # concatenate all sentences\n chars = sorted(list(set(sentences))) # extract unique characters (unigrams)\n bigrams = sorted(list(set(self.ngrams(sentences, 2))))\n all_grams = chars + bigrams + ['unk'] # add unknown character\n\n self.__dictionary = dict((c, i) for i, c in enumerate(all_grams, start=1))\n self.__vocab_size = len(self.__dictionary)\n\n if self.__verbose:\n print('Vocab size:', self.__vocab_size)" ]
[ "0.62009716", "0.6035176", "0.60308325", "0.6027758", "0.58148813", "0.5691236", "0.567909", "0.5636329", "0.562765", "0.56133324", "0.5553161", "0.55471104", "0.55366445", "0.5535218", "0.5534292", "0.5531749", "0.5505629", "0.54945356", "0.5477114", "0.54692113", "0.5455944", "0.54458296", "0.5425127", "0.54187727", "0.5410996", "0.54065347", "0.53945845", "0.5387348", "0.5384576", "0.53568333", "0.53473395", "0.5346631", "0.53459466", "0.5340256", "0.5324538", "0.52963233", "0.5294783", "0.52858496", "0.52546775", "0.5239099", "0.52319044", "0.5225449", "0.52217567", "0.52195215", "0.5217791", "0.5214494", "0.52060163", "0.52019364", "0.519916", "0.5196079", "0.51956147", "0.5173655", "0.51658994", "0.5162064", "0.515137", "0.51510453", "0.5146803", "0.51375115", "0.5127094", "0.51230294", "0.512278", "0.51192415", "0.51178014", "0.51178014", "0.5115194", "0.5113799", "0.5111727", "0.51078534", "0.51060086", "0.5102112", "0.51018727", "0.5095402", "0.50945175", "0.50858355", "0.50832796", "0.5079058", "0.5074958", "0.50578624", "0.50549287", "0.5043389", "0.504256", "0.5037008", "0.5033246", "0.5027433", "0.50264406", "0.502548", "0.502548", "0.5023507", "0.5018743", "0.5012741", "0.5010946", "0.5010225", "0.5008927", "0.50040543", "0.5001882", "0.4998111", "0.4994764", "0.4990679", "0.49859333", "0.49848607" ]
0.6880938
0
Adds ``documents`` to the document inventory, writing to disk in batches of 500,000.
def add_documents(self, documents): # flag for StopIteration exceptions more_documents = True # loop while there are still documents in the iterator while more_documents: # increment batch number batch = len(self.batch_stats) + 1 # count sentences sentences_count = 0 # create temporary batch data file in the version directory batch_file = os.path.join(self.file_base.get_version_path(self.version), "data.jl.gz.temp") # try to read the next batch of files, catch exception and stop if there are no more try: # get next document before opening the file just to make sure it's there document = documents.next() # open the data file with gzip.open(batch_file, "wb") as outfile: # loop through DOCUMENT_BATCH_SIZE documents for i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE): # count sentences in document for paragraph in document["paragraphs"]: sentences_count += len(paragraph["sentences"]) # write JSON to file one line at a time outfile.write("%s\n" % json.dumps(document)) # if we are not done with this batch, retrieve the next document if i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1: document = documents.next() except StopIteration: # the end of the documents stream, set the flag to False more_documents = False # make sure the batch isn't empty if sentences_count > 0: # create the new batch in the file system self.version_batches.create_latest_version() # add the stats to the statistics hash self.batch_stats[batch] = BatchStats(sentences_count) # write the batch statistics to file with codecs.open(self._get_batch_stat_file(batch), "wb", "utf-8") as outfile: # write the JSON representation for the stats outfile.write(json.dumps(self.batch_stats[batch].to_json())) # move the temp data file to the correct location inside the version folder os.rename(batch_file, self._get_batch_file(batch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_documents(self, documents: list):\n requests = [\n {'PutRequest': {'Item': Item}} \n for Item in documents\n ]\n ticks = [d['symbol'] for d in documents]\n size = getsizeof(requests)\n exceptions = self.dynamo_client.exceptions\n errors = (exceptions.ProvisionedThroughputExceededException)\n\n self.Logger.info(\n f'Writing batch of {ticks} into dynamodb '\n f'with size {size} bytes',\n extra={\"message_info\": {\"Type\": \"DynamoDB write\", \"Tickers\": ticks, \"Size\": size}}\n )\n \n try:\n response = self.dynamo_resource.batch_write_item(\n RequestItems={self.table_name: requests},\n ReturnConsumedCapacity = 'INDEXES')\n \n self.Logger.debug(f'{response}')\n \n if response['UnprocessedItems']:\n raise RuntimeError('UnprocessedItems in batch write')\n except errors as ex:\n raise app.AppException(ex, f'dynamodb throughput exceed')\n\n return True", "def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def insert_documents(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n # python doesn't support prepared statements, but instead has a builtin sql cache\n connection.execute(\n \"INSERT INTO docs(did, title, url) VALUES (?, ?, ?)\", doc.convert_to_tuple())\n current += 1\n print(f\"\\r[{current}/{max_}] doc done\", end='')\n connection.execute(\"COMMIT\")", "def add_documents(self, docs):\n for doc in docs:\n assert isinstance(doc, pylastica.document.Document), \"All items in list docs must be of type Document: %r\" % doc\n doc.doc_type = self.name\n return self.index.add_documents(docs)", "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise", "def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()", "def documents(self, documents):\n\n self._documents = documents", "def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "async def bulk_insert(self, documents, alias=None):\n\n is_valid = True\n docs_to_insert = []\n\n for document_index, document in enumerate(documents):\n self.update_field_on_save_values(document, document._id is not None)\n try:\n is_valid = is_valid and self.validate_document(document)\n except Exception:\n err = sys.exc_info()[1]\n raise ValueError(\n \"Validation for document %d in the documents you are saving failed with: %s\"\n % (document_index, str(err))\n )\n\n if not is_valid:\n return\n\n docs_to_insert.append(document.to_son())\n\n if not is_valid:\n return\n\n doc_ids = await self.coll(alias).insert(docs_to_insert)\n\n for object_index, object_id in enumerate(doc_ids):\n documents[object_index]._id = object_id\n\n return documents", "def add(self, documents):\n\n if self.cluster:\n self.cluster.add(documents)\n else:\n super().add(documents)\n\n return documents", "def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()", "def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def save(self, batch_of_documents, destination=\"exports\", *args, **kwargs):\n raise NotImplementedError", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def add_documents(\n self,\n index: str,\n documents: List[Dict[str, Any]],\n routing: Callable[[Dict[str, Any]], str] = None,\n doc_id: Callable[[Dict[str, Any]], str] = None,\n ) -> int:\n\n def map_doc_2_action(doc: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Configures bulk action\"\"\"\n data = {\n \"_op_type\": \"index\",\n \"_index\": index,\n \"_routing\": routing(doc) if routing else None,\n **doc,\n }\n\n _id = doc_id(doc) if doc_id else None\n if _id is not None:\n data[\"_id\"] = _id\n\n return data\n\n success, failed = es_bulk(\n self.__client__,\n index=index,\n actions=map(map_doc_2_action, documents),\n raise_on_error=True,\n refresh=\"wait_for\",\n )\n return len(failed)", "async def index_documents(self, app_id, namespace, index_name, documents):\n collection = get_collection_name(app_id, namespace, index_name)\n solr_documents = [_to_solr_document(doc) for doc in documents]\n await self.solr.put_documents(collection, solr_documents)", "def parallel_import_documents(self, index, documents, **kwargs):\n \n # Set default values in passed as kwargs\n chunk_size = kwargs.get('chunk_size', None)\n if chunk_size is None:\n chunk_size = 20000\n kwargs['chunk_size'] = chunk_size\n \n request_timeout = kwargs.get('request_timeout', None)\n if request_timeout is None:\n request_timeout = 3600\n kwargs['request_timeout'] = request_timeout\n \n doc_type = kwargs.get('doc_type', None)\n if doc_type is None:\n doc_type = \"_doc\"\n kwargs['doc_type'] = doc_type\n \n raise_on_exception = kwargs.get('raise_on_exception', None)\n if raise_on_exception is None:\n raise_on_exception = False\n kwargs['raise_on_exception'] = raise_on_exception\n \n raise_on_error = kwargs.get('raise_on_error', None)\n if raise_on_error is None:\n raise_on_error = False\n kwargs['raise_on_error'] = raise_on_error\n \n self._logger.info('%s documents to index into %s', len(documents), index)\n doc_count = 0 \n \n if len(documents) > 0:\n for success, info in helpers.parallel_bulk(self.es, documents, index=index, **kwargs):\n if not success:\n self._logger.error(f'A document failed: {info}')\n else:\n doc_count += 1\n \n self._logger.info('%s documents indexed into %s', doc_count, index)\n \n return doc_count", "def insert(self, index, documents, batch_size=100):\n actions = []\n latest_index_id, begin_timestamp = self.__get_latest_index(index)\n\n for idx, doc in enumerate(documents):\n index_id = latest_index_id\n\n if doc[\"request_time\"] <= begin_timestamp:\n index_id = self.get_query_index(index, doc[\"request_time\"])\n\n action = {\n \"_index\": index + \"_\" + str(index_id),\n \"_type\": \"docs\",\n \"_source\": doc,\n }\n actions.append(action)\n\n if len(actions) == batch_size or idx == len(documents) - 1:\n print(\"Bulk ingesting started...\")\n\n try:\n bulk(self.client, actions, raise_on_error=True, request_timeout=200)\n except:\n print(\"Could not write the data.\")\n raise\n \n actions.clear()\n print(\"Bulk ingesting done\")\n if self.__get_index_size(index, latest_index_id) >= self.THRESHOLD:\n begin_timestamp = self.__update_index_timerange(\n index, latest_index_id\n )\n latest_index_id = self.__create_new_index(\n index, latest_index_id + 1, begin_timestamp\n )", "def ingest_all(self, docs):\n for doc in docs:\n self.ingest(doc)", "def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)", "def batch(self, requests):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/indexes/*/batch\", self.timeout, {\"requests\": requests})", "def upload(self, documents: List[ElasticDocument], vectorise_func, index: str = None) -> None:\n if not index:\n index = self._index\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n for batch in batches:\n payload = []\n # Calculate vectors\n vectorise_func(batch, self)\n\n for document in batch:\n # JSON representation of document\n doc_json = document.to_elastic()\n\n # Add correct index\n doc_json[\"_index\"] = index\n\n # Rename id key\n doc_json[\"_id\"] = doc_json[\"id\"]\n del doc_json[\"id\"]\n\n payload.append(doc_json)\n\n # Bulk upload to elasticsearch\n helpers.bulk(self._client, payload)\n\n # Update index\n self._client.indices.refresh(index=self._index)", "def add(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n try:\n psycopg2.extras.execute_batch(\n cursor,\n f'INSERT INTO {self.table} (ID, DOC) VALUES (%s, %s)',\n [\n (\n doc.id,\n doc.SerializeToString(),\n )\n for doc in docs\n ],\n )\n except psycopg2.errors.UniqueViolation as e:\n self.logger.warning(\n f'Document already exists in PSQL database. {e}. Skipping entire transaction...'\n )\n self.connection.rollback()\n self.connection.commit()", "def insert_boost(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n connection.execute(\n \"INSERT INTO boost(did, date, page) VALUES (?, ?, ?)\", (doc.id, doc.date, doc.page))\n connection.execute(\"COMMIT\")\n current += len(chunk)\n print(f\"\\r[{current}/{max_}] boost done\", end='')\n print()", "def add_document_lists(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)", "def batch_add_documents(rows: tuple, client: firestore.Client, context: Context = None) -> bool:\n if len(rows) < 1:\n return\n logger.debug('Beginning batch add.')\n\n batch = client.batch()\n col = client.collection(config.FIRESTORE_IDENTITY_POOL)\n batch_size = 0\n\n for row in rows:\n doc_id = row.pop('mpi')\n doc_ref = col.document(doc_id)\n batch.set(doc_ref, row)\n batch_size += 1\n \n try:\n batch.commit()\n logger.info(f\"Committed batch of {batch_size} records\")\n return True\n except Exception as e:\n logger.error(e)\n return False", "def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()", "def import_documents(self, index, documents, **kwargs):\n self._logger.info('%s documents to index into %s', len(documents), index)\n response = None\n if 'pipeline' in kwargs:\n pipeline_name = kwargs.get(\"pipeline\")\n response = helpers.bulk(self.es, documents, index=index, doc_type=self.doc_type, pipeline=pipeline_name)\n else:\n response = helpers.bulk(self.es, documents, index=index, doc_type=self.doc_type)\n\n # It returns a tuple with summary information - \n # number of successfully executed actions and either list of errors or number of errors if stats_only is set to True.\n return response", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "def upload_requests_to_couch(requests):\n couchdb_host = 'open311.couchone.com'\n couchdb_path = '/service-requests/_bulk_docs'\n docs = {'docs':requests}\n \n couchdb_conn = httplib.HTTPConnection(couchdb_host)\n upload_request = couchdb_conn.request(\n 'POST', couchdb_path, json.dumps(docs),\n { 'Content-type' : 'application/json' })\n \n upload_response = couchdb_conn.getresponse()\n return upload_response.read()", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def index_document(self, document):\n # Recursively collect records\n records = []\n if document.get_type() is document.TYPE_DIR:\n dirname = document.get_filename()\n subdirs, files = document.get_contents()\n for subdir in subdirs:\n document.set_filename(os.path.join(dirname, subdir))\n self.index_document(document)\n for filename in files:\n document.set_filename(os.path.join(dirname, filename))\n record = self.create_record(document)\n if record is not None:\n records.append(record)\n\n if len(records) == 0:\n return\n\n # Store records\n writer = self.get_index().writer()\n for record in records:\n writer.add_document(**record)\n writer.commit()", "def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def load(self, documents, uniquify=False):\n assert documents, \"missing list of documents, text single doc per line\"\n assert isinstance(documents, list), \"documents must be list\"\n assert isinstance(documents[0], list), \"each document is also a list\"\n #--------------------------------------------------------------------------------------------\n\n def _get_new_counts(document):\n return Counter(document) if not uniquify else Counter(list(set(document)))\n\n for idx, document in enumerate(documents):\n new_counter = _get_new_counts(document)\n self.counter.update(new_counter)\n if idx % 1000 == 0:\n print(\"load: {}\\r\".format(idx), end='')\n return self", "def linear(files):\n return list(map(insert_to_mongo, files))", "def index_documents(self, engine_name, documents):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(documents)\n\n return self.swiftype_session.request('post', endpoint, data=data)", "def save_docs(self, docs, use_uuids=True, new_edits=None, **params):\n\n if not isinstance(docs, (list, tuple)):\n docs = tuple(docs)\n docs1 = []\n docs_schema = []\n for doc in docs:\n doc1, schema = _maybe_serialize(doc)\n docs1.append(doc1)\n docs_schema.append(schema)\n\n def is_id(doc):\n return '_id' in doc\n\n if use_uuids:\n noids = []\n for k, g in groupby(docs1, is_id):\n if not k:\n noids = list(g)\n\n uuid_count = max(len(noids), self.server.uuid_batch_count)\n for doc in noids:\n nextid = self.server.next_uuid(count=uuid_count)\n if nextid:\n doc['_id'] = nextid\n\n payload = {\"docs\": docs1}\n if new_edits is not None:\n payload[\"new_edits\"] = new_edits\n\n # update docs\n res = self._request_session.post(\n self._database_path('_bulk_docs'), data=json.dumps(payload),\n headers={\"Content-Type\": \"application/json\"}, **params)\n res.raise_for_status()\n results = res.json()\n\n errors = []\n for i, res in enumerate(results):\n if 'error' in res:\n errors.append(res)\n logging_context = dict(\n method='save_docs',\n params=params,\n error=res['error'],\n )\n error_logger.error(\"save_docs error\", extra=logging_context)\n else:\n if docs_schema[i]:\n docs[i]._doc.update({\n '_id': res['id'],\n '_rev': res['rev']\n })\n else:\n docs[i].update({\n '_id': res['id'],\n '_rev': res['rev']\n })\n if errors:\n raise BulkSaveError(errors, results)\n return results", "def queue_all_texts(self, q, texts, window_size):\n for batch_num, batch in enumerate(self.yield_batches(texts)):\n q.put(batch, block=True)\n before = self._num_docs / self.log_every\n self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)\n if before < (self._num_docs / self.log_every):\n logger.info(\n \"%d batches submitted to accumulate stats from %d documents (%d virtual)\",\n (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)", "def train(self, documents, total_examples=None, total_words=None,\n epochs=None, start_alpha=None, end_alpha=None,\n word_count=0, queue_factor=2, report_delay=1.0, callbacks=()):\n super(Doc2Vec, self).train(\n documents, total_examples=total_examples, total_words=total_words,\n epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,\n queue_factor=queue_factor, report_delay=report_delay, callbacks=callbacks)", "def insert_tfs(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n rows = (d.get_tfs_rows() for d in chunk)\n connection.execute(\"BEGIN TRANSACTION\")\n for row in rows:\n connection.executemany(\n \"INSERT INTO tfs(did, term, tf) VALUES (?, ?, ?)\", row)\n connection.execute(\"COMMIT\")\n current += len(chunk)\n print(f\"\\r[{current}/{max_}] doc-tfs done\", end='')\n print()", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def bulkupload(self, string, bibo):\n if not self.filemode:\n self.bulknum += 1\n self.esdocs.append(self.rdf2es(string, bibo))\n\n if self.filemode:\n # Output content to file\n #I think we shouldn't serialize the content in memory in the output-file mode\n\n for outer in self.esdocs:\n for inner in outer:\n #self.of.write(dumps(inner, separators='\\n'))\n #we need this json dump method because the content is stored in a dictionary structure - as far as I understand it\n #so we can't just write a string\n dump(inner, self.of)\n #dump(bytes(inner,'UTF-8'), self.of)\n self.writtenDocuments += 1\n\n self.of.write('\\n')\n #perhaps flush it only in bigger chunks? - later\n #self.of.flush()\n del self.esdocs[:]\n if self.writtenDocuments >= self.bulksize:\n self._closeFile()\n self.writtenDocuments = 0\n self._openFile()\n\n elif self.bulknum >= self.bulksize:\n # Perform bulk upload\n helpers.bulk(client=self.of, actions=self.esdocs, stats_only=True)\n # Reset counter and empty list\n self.bulknum = 0\n del self.esdocs[:]", "def index(self, files):\n docs = []\n global N_CHARS\n\n # Reset the database to measure complete indexing time\n solr.delete(q='*:*', commit=True)\n\n for elem in files:\n with open(elem, 'r') as f:\n file_text = f.read()\n doc_id = int(elem.split('.')[0].split('/')[-1])\n self.docs.append({\n \"id\": doc_id,\n \"text\": file_text[:N_CHARS]\n })\n resp = self.solr.add(self.docs, commit=True)\n self.n_rows += len(files)\n return json.loads(resp)['responseHeader']['QTime'] / 1000", "def update_documents(self, engine_name, documents):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(documents)\n\n return self.swiftype_session.request('patch', endpoint, data=data)", "def test_batch_upload(\n large_upload_collection: UploadCollection,\n fake_session: HexpySession,\n caplog: CaptureFixture,\n) -> None:\n responses.add(\n responses.POST, HexpySession.ROOT + \"content/upload\", json={}, status=200\n )\n\n client = ContentUploadAPI(fake_session)\n\n with caplog.at_level(logging.INFO):\n response = client.upload(\n document_type=123456789, items=large_upload_collection, request_usage=True\n )\n\n assert (\n caplog.records[0].msg\n == \"More than 1000 items found. Uploading in batches of 1000.\"\n )\n\n assert response == {\"Batch 0\": {}, \"Batch 1\": {}, \"Batch 2\": {}, \"Batch 3\": {}}", "def test_add_documents(empty_index, small_movies):\n index = empty_index()\n response = index.add_documents(small_movies)\n assert isinstance(response, TaskInfo)\n assert response.task_uid is not None\n update = index.wait_for_task(response.task_uid)\n assert index.get_primary_key() == \"id\"\n assert update.status == \"succeeded\"", "def add(self, batch_size=10000):\n if self.N <= batch_size:\n self.index.add(self.database)\n else:\n [self.index.add(self.database[i:i + batch_size])\n for i in tqdm(range(0, len(self.database), batch_size),\n desc='[index] add')]", "def ingest(self, files):\n for file in files:\n self.files.add(file)", "def iter_documents(self):\n raise NotImplementedError", "def process_all_documents(self,\n n_jobs: Optional[int] = None,\n ) -> List[Optional[Document]]:\n return self.process_documents(self.doc_ids, n_jobs)", "def _(event):\n\n N = len(self.view_model.results)\n coll = self.shared_state[\"active_collection\"]\n self.view_model.status_textcontrol.text = (\n f\"adding {N} records to {coll.name}...\"\n )\n count = 0\n for record in self.view_model.results:\n try:\n coll.add_document(record_id=record[\"record_id\"])\n count += 1\n except Exception:\n pass\n self.view_model.status_textcontrol.text = (\n f\"added {count} records to {coll.name}.\"\n )", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def add(self, keys: List[int], docs: List['gnes_pb2.Document'], *args, **kwargs):\n pass", "def test_bulk_index_iterates_docs_only_once(self):\n doc = self._make_doc()\n docs = OneshotIterable([doc])\n self.adapter.bulk_index(docs) # does not raise IterableExhaustedError", "def fit(self, documents):\n n_words_trained = 0\n tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(\n documents)\n n_tokens = len(tokens)\n n_vocab = len(self.vocab)\n words_per_epoch = n_vocab / self.n_epochs\n self._cum_dist = self._build_cum_dist()", "async def add_document(\n self,\n doc_id,\n nosave=False,\n score=1.0,\n payload=None,\n replace=False,\n partial=False,\n no_create=False,\n **fields,\n ):\n self.client._add_document(\n doc_id,\n conn=self._pipeline,\n nosave=nosave,\n score=score,\n payload=payload,\n replace=replace,\n partial=partial,\n no_create=no_create,\n **fields,\n )\n self.current_chunk += 1\n self.total += 1\n if self.current_chunk >= self.chunk_size:\n await self.commit()", "def add_all(cls, documents: List[dict]) -> List[dict]:\n if not documents:\n raise ValidationFailed([], message=\"No data provided.\")\n\n if not isinstance(documents, list):\n raise ValidationFailed(documents, message=\"Must be a list of dictionaries.\")\n\n new_documents = copy.deepcopy(documents)\n\n errors = cls.validate_and_deserialize_insert(new_documents)\n if errors:\n raise ValidationFailed(documents, errors)\n\n try:\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Inserting {new_documents}...\")\n cls._insert_many(new_documents)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(\"Documents inserted.\")\n return [cls.serialize(document) for document in new_documents]\n except pymongo.errors.BulkWriteError as e:\n raise ValidationFailed(documents, message=str(e.details))", "def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)", "def run(\n self,\n query=\"*\",\n destination=\"exports/\",\n overwrite=False,\n batchsize=None,\n *args,\n **kwargs\n ):\n if not batchsize:\n batchsize = self.batchsize\n for docbatch in self._process_by_batch(\n self._retrieve(query), batchsize=batchsize\n ):\n self.save(docbatch, destination=destination, *args, **kwargs)\n if self.fileobj:\n self.fileobj.close()", "def store_requests(self, requests):\n coll = self._db.get_collection(COLLECTION_REQUEST)\n result = coll.insert_many(requests)\n return result.inserted_ids", "def batch_write(client, resources, batch_size=MAX_DYNAMO_BATCH_SIZE, batch_counter_step=MAX_DYNAMO_BATCH_SIZE):\n idx = 0\n item_count = 0\n\n batch = defaultdict(list)\n for idx, batch_resources in enumerate(chunk(resources, batch_size)):\n batch.clear()\n for resource in batch_resources:\n batch[getmeta(resource).table_name(client)].append(\n {'PutRequest': {'Item': resource.to_dynamo_dict(skip_null_fields=True)}}\n )\n item_count += 1\n\n if (idx % batch_counter_step) == 0:\n logger.info(\"Loading batch: %s\", idx)\n\n client.batch_write_item(RequestItems=batch)\n\n logger.info(\"Loaded %s records in %s batches.\", item_count, idx + 1)", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def processjob(self, job):\n self.model.add_documents(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)", "def backupDocuments(currentTime,baseDir):\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n pathToFolder = baseDir +'Results/'\n FEATURES_DIR = pathToFolder + '/Features/' + currentTime\n docToFeatureVector = parseFeatures(FEATURES_DIR)\n documents = db.documents.find({})\n for document in documents:\n document['text']= document.pop('current_document')\n document['id']= document.pop('_id')\n document['features'] = docToFeatureVector[document[\"query_id\"]+\"-\"+document[\"username\"]]\n del document['posted_document']\n document['iteration'] = currentTime\n db.archive.save(document)", "def collecte_docs(self, chercheur, overwrite=False): # self,\n init = overwrite # If True, data persistence is lost when references are updated\n docs = hal.find_publications(chercheur[\"halId_s\"], \"authIdHal_s\")\n\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, len(docs), description=\"récupération des données HAL\")\n # Insert documents collection\n for num, doc in enumerate(docs):\n doc[\"country_colaboration\"] = location_docs.generate_countrys_fields(doc)\n doc = doi_enrichissement.docs_enrichissement_doi(doc)\n if \"fr_abstract_s\" in doc.keys():\n if isinstance(doc[\"fr_abstract_s\"], list):\n doc[\"fr_abstract_s\"] = \"/n\".join(doc[\"fr_abstract_s\"])\n if len(doc[\"fr_abstract_s\"]) > 100:\n doc[\"fr_entites\"] = keyword_enrichissement.return_entities(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n doc[\"fr_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n if \"en_abstract_s\" in doc.keys():\n if isinstance(doc[\"en_abstract_s\"], list):\n doc[\"en_abstract_s\"] = \"/n\".join(doc[\"en_abstract_s\"])\n if len(doc[\"en_abstract_s\"]) > 100:\n doc[\"en_entites\"] = keyword_enrichissement.return_entities(\n doc[\"en_abstract_s\"], \"en\"\n )\n doc[\"en_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"en_abstract_s\"], \"en\"\n )\n\n doc[\"_id\"] = doc[\"docid\"]\n doc[\"validated\"] = True\n\n doc[\"harvested_from\"] = \"researcher\"\n\n doc[\"harvested_from_ids\"] = []\n doc[\"harvested_from_label\"] = []\n\n #\n #\n # print(doc[\"authorship\"], doc ['authLastName_s'])\n\n if len(doc[\"authIdHal_s\"]) != len(doc[\"authLastName_s\"]):\n # print (\"elastichal.py : test d'autorat no good\")\n # test sur le nom complet...\n nom = [\n truc\n for truc in doc[\"authLastName_s\"]\n if chercheur[\"lastName\"].lower() in truc.lower()\n ] # pour les récemment mariés qui auraient un nom composé...\n # Après si 'lun des co-auteur porte le même nom...\n if len(nom) > 0:\n nom = nom[0].title()\n try:\n if doc[\"authLastName_s\"].index(nom) == 0: # premier\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authLastName_s\"].index(nom) == len(doc[\"authLastName_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n except ValueError:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n elif chercheur[\"halId_s\"] in doc[\"authIdHal_s\"]:\n if doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == 0:\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == len(doc[\"authIdHal_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n else:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n\n doc[\"harvested_from_ids\"].append(chercheur[\"halId_s\"])\n\n # historique d'appartenance du docId\n # pour attribuer les bons docs aux chercheurs\n # harvet_history.append({'docid': doc['docid'], 'from': row['halId_s']})\n #\n # for h in harvet_history:\n # if h['docid'] == doc['docid']:\n # if h['from'] not in doc[\"harvested_from_ids\"]:\n # doc[\"harvested_from_ids\"].append(h['from'])\n\n doc[\"records\"] = []\n\n doc[\"MDS\"] = utils.calculate_mds(doc)\n\n try:\n should_be_open = utils.should_be_open(doc)\n if should_be_open == 1:\n doc[\"should_be_open\"] = True\n if should_be_open == -1:\n doc[\"should_be_open\"] = False\n\n if should_be_open == 1 or should_be_open == 2:\n doc[\"isOaExtra\"] = True\n elif should_be_open == -1:\n doc[\"isOaExtra\"] = False\n except IndexError:\n print(\"publicationDate_tdate error ?\")\n doc[\"Created\"] = datetime.datetime.now().isoformat()\n\n if not init: # récupération de l'existant pour ne pas écraser\n field = \"_id\"\n doc_param = esActions.scope_p(field, doc[\"_id\"])\n\n if not es.indices.exists(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\"\n ): # -researchers\" + row[\"ldapId\"] + \"-documents\n print(\"exception \", chercheur[\"labHalId\"], chercheur[\"ldapId\"])\n\n res = es.search(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n body=doc_param,\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n\n if len(res[\"hits\"][\"hits\"]) > 0:\n doc[\"validated\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"validated\"]\n if \"authorship\" in res[\"hits\"][\"hits\"][0][\"_source\"]:\n doc[\"authorship\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"authorship\"]\n\n if (\n res[\"hits\"][\"hits\"][0][\"_source\"][\"modifiedDate_tdate\"]\n != doc[\"modifiedDate_tdate\"]\n ):\n doc[\"records\"].append(\n {\n \"beforeModifiedDate_tdate\": doc[\"modifiedDate_tdate\"],\n \"MDS\": res[\"hits\"][\"hits\"][0][\"_source\"][\"MDS\"],\n }\n )\n\n else:\n doc[\"validated\"] = True\n progress_recorder.set_progress(num, len(docs), description=\"(récolte)\")\n progress_recorder.set_progress(num, len(docs), description=\"(indexation)\")\n helpers.bulk(\n es,\n docs,\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n refresh=\"wait_for\",\n )\n\n return chercheur # au cas où", "def index_bulk_from_files(self, files):\r\n\r\n docs = self._mailextractor.extract_jsons(files) # Generator-Iterable\r\n actions = self.convert_docstrs_to_bulk_actions(docs) # Generator-Iterable\r\n\r\n self._cur_print = 0\r\n actions_for_chunk = self.print_chunk_progress(actions) # Generator-Iterable\r\n (cnt_success, errors_index) = es_helpers.bulk(\r\n self._es, actions_for_chunk, chunk_size=constants.ES_BULK_CHUNK_SIZE)\r\n\r\n cnt_total = self._mailextractor.cnt_total\r\n errors_convert = self._mailextractor.errors_convert\r\n cnt_error = len(errors_convert) + len(errors_index)\r\n return Summary(cnt_total=cnt_total, cnt_success=cnt_success, cnt_error=cnt_error,\r\n errors_convert=errors_convert, errors_index=errors_index)", "def consume_data(self, data):\n # Get parameters\n logger_manager = data['logger_manager']\n doc_m = data['document_manager']\n message_id = data['message_id']\n documents = data['documents']\n to_remove_queue = data['to_remove_queue']\n duplicates = no_requestInTs = 0\n hash_set = set()\n\n for current_document in documents:\n\n # Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)\n if current_document['requestInTs'] is None and current_document['securityServerType'] is None:\n to_remove_queue.put(current_document['_id'])\n no_requestInTs += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('no_requestInTs',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is batch duplicated\n current_document_hash = doc_m.calculate_hash(current_document)\n if current_document_hash in hash_set:\n # If yes, mark to removal\n to_remove_queue.put(current_document['_id'])\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('batch_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is database duplicated\n if self.db_m.check_if_hash_exists(current_document_hash):\n # If here, add to batch duplicate cache\n hash_set.add(current_document_hash)\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('database_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Mark hash as seen\n hash_set.add(current_document_hash)\n # Find possible matching documents\n matching_documents = self.db_m.find_by_message_id(current_document)\n # Try to match the current document with possible pairs (regular)\n merged_document = doc_m.find_match(current_document, matching_documents)\n matching_type = ''\n\n if merged_document is None:\n # Try to match the current document with orphan-matching\n merged_document = doc_m.find_orphan_match(current_document, matching_documents)\n if merged_document is not None:\n matching_type = 'orphan_pair'\n else:\n matching_type = 'regular_pair'\n\n if merged_document is None:\n matching_type = 'orphan'\n if current_document['securityServerType'] == 'Producer':\n new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)\n else:\n if current_document['securityServerType'] != 'Client':\n current_document['securityServerType'] = 'Client'\n new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)\n\n new_document = doc_m.apply_calculations(new_document)\n new_document['correctorTime'] = database_manager.get_timestamp()\n new_document['correctorStatus'] = 'processing'\n new_document['matchingType'] = matching_type\n\n # Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair\n if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \\\n and new_document['client']['clientXRoadInstance'] is None:\n new_document['correctorStatus'] = 'done'\n new_document['matchingType'] = 'orphan'\n\n self.db_m.add_to_clean_data(new_document)\n\n else:\n\n if current_document['securityServerType'] == 'Client':\n\n if merged_document['client'] is None:\n merged_document['client'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['clientHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)\n logger_manager.log_warning('corrector_merging', msg)\n\n else:\n\n if merged_document['producer'] is None:\n merged_document['producer'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['producerHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)\n logger_manager.log_error('corrector_merging', msg)\n\n self.db_m.mark_as_corrected(current_document)\n\n if no_requestInTs:\n msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)\n logger_manager.log_warning('corrector_no_requestInTs', msg)\n\n return duplicates", "def add_document(\n self,\n doc_id,\n nosave=False,\n score=1.0,\n payload=None,\n replace=False,\n partial=False,\n no_create=False,\n **fields,\n ):\n self.client._add_document(\n doc_id,\n conn=self._pipeline,\n nosave=nosave,\n score=score,\n payload=payload,\n replace=replace,\n partial=partial,\n no_create=no_create,\n **fields,\n )\n self.current_chunk += 1\n self.total += 1\n if self.current_chunk >= self.chunk_size:\n self.commit()", "def getAndSaveDocuments(base_url, delay=None):\n\n\tsampled_documents_filepath = getScriptDirectory() + \"/result/sampled_documents.csv\"\n\n\t# Check if sample exists\n\tif not os.path.isfile(sampled_documents_filepath):\n\t\tprint \"getAndSaveDocuments() was called but sampled_documents.csv doesn't exist.\"\n\n\t\treturn\n\n\t# Get and save each document in the sample\n\tdocuments = pandas.read_csv(sampled_documents_filepath)\n\tnodes = getNodeList(base_url)\n\tformats = getFormatList(base_url)\n\n\tprint(\"Total sampled documents to save: %d\" % documents.shape[0])\n\n\tfor i in range(0, documents.shape[0]):\n\t\tprint \"[%d of %d]\" % (i + 1, documents.shape[0])\n\n\t\tnode_identifier = documents['authoritativeMN'][i]\n\n\t\t# Get the meta and object XML\n\t\tdocument_identifier = documents['identifier'][i]\n\t\tmeta_xml = getIdentifierMetaXML(base_url, document_identifier)\n\n\n\t\t# Determine if the node identifier is in the Node list.\n\t\t# If not, it is an invalid node id, and should be replaced with\n\t\t# the authoritativeMN from the system metadata\n\n\t\tvalid_node = True\n\n\t\tif (node_identifier not in nodes):\n\t\t\tvalid_node = False\n\n\t\t\tif meta_xml is not None:\n\t\t\t\tnode_id_element = meta_xml.find(\"./authoritativeMN\")\n\n\t\t\t\tif node_id_element is not None:\n\t\t\t\t\tnode_identifier = node_id_element.text\n\n\t\t# Remove \"urn:node:\" from node_identifier\n\t\t#\n\t\t# This remove redundant text from the folder names\n\t\t# but also deals with how Mac OS handles colons in file paths.\n\t\t# Mac OS considers colons (:) to separate folders in a file\n\t\t# hierarchy so ./result/urn:node:foo will be shown in Cocoa apps as\n\t\t# ./result/urn/node/foo where urn/node/foo is the folder name.\n\t\t# This is confusing because the folder appears with colons when viewed\n\t\t# from the terminal. This fixes removes the ambiguity between the terminal\n\t\t# and Cocoa applications.\n\n\t\tnode_short_identifier = node_identifier.split(\":\")\n\t\tnode_short_identifier = node_short_identifier[len(node_short_identifier) - 1]\n\n\t\t# Make the subdirectories to store files\n\t\tsubdirectory_path = getScriptDirectory() + \"/result/\" + node_short_identifier\n\n\t\t# Don't get metadata again if directory exists for identifier\n\t\tif not os.path.exists(subdirectory_path):\n\t\t\tos.makedirs(subdirectory_path)\n\n\t\tif delay is not None:\n\t\t\ttime.sleep(delay)\n\n\t\t# Extract the formatId from the sysmeta\n\t\tformat_path = None\n\n\t\tif meta_xml is not None:\n\t\t\tformat_id_element = meta_xml.find(\"./formatId\")\n\n\t\t\tif format_id_element is not None:\n\t\t\t\tformat_path = formats[format_id_element.text]['formatPath']\n\n\t\tif format_path is None:\n\t\t\tprint \"\\t\\tFailed to extract metadata format from system metadata file. Continuing.\"\n\n\t\t\tcontinue\n\n\t\tobject_xml = getIdentifierObjectXML(base_url, document_identifier)\n\n\t\tif delay is not None:\n\t\t\ttime.sleep(delay)\n\n\n\t\tsysmeta_path = subdirectory_path + \"/sysmeta/xml\"\n\n\t\tif not os.path.exists(sysmeta_path):\n\t\t\tos.makedirs(sysmeta_path)\n\n\t\tif meta_xml is not None:\n\t\t\tET.ElementTree(meta_xml).write(sysmeta_path + \"/\" + str(i).rjust(5, '0') + \"-sysmeta.xml\")\n\n\t\tmetadata_path = subdirectory_path + \"/\" + format_path + \"/xml\"\n\n\t\tif not os.path.exists(metadata_path):\n\t\t\tos.makedirs(metadata_path)\n\n\t\tif object_xml is not None:\n\t\t\tET.ElementTree(object_xml).write(metadata_path + \"/\" + str(i).rjust(5, '0') + \"-metadata.xml\")", "def process_documents(self,\n doc_ids: Iterable[str],\n n_jobs: Optional[int] = None,\n ) -> List[Optional[Document]]:\n if n_jobs is None:\n n_jobs = self.n_jobs\n elif n_jobs == -1:\n n_jobs = os.cpu_count()\n elif n_jobs < -1:\n raise ValueError(f'n_jobs must be >= 0 or -1, but got {n_jobs}')\n if self.archive_handler is not None:\n assert n_jobs == 0\n with (self.archive_handler.open() if self.archive_handler else nullcontext()) as archive:\n process_document = partial(KyotoReader.process_document, self, archive=archive)\n if n_jobs > 0:\n with futures.ProcessPoolExecutor(max_workers=n_jobs) as executor:\n rets: Iterable[Optional[Document]] = executor.map(process_document, doc_ids)\n else:\n rets: Iterable[Optional[Document]] = map(process_document, doc_ids)\n return list(rets)", "def process_documents(session, endpoint, docs, id_map):\n for doc in docs:\n original_asset = doc['asset']\n\n if original_asset['name'] == '' or original_asset['name'] is None:\n LOG.warn('Skipping asset {} with empty name'.format(original_asset['id']))\n\n asset = {}\n asset.update(original_asset)\n del asset['id'] # since it is going to be different\n report = {'source_id': original_asset['id'], 'type': 'upload'}\n\n dest_id = id_map.get(original_asset['id'])\n\n already_exists = dest_id is not None\n if already_exists:\n url = endpoint + dest_id + '/'\n r = session.get(url)\n if r.status_code == 404:\n already_exists = False\n LOG.warn('asset {} not found (original id {})'.format(\n dest_id, original_asset['id']))\n\n if already_exists:\n report['method'] = 'PUT'\n report['url'] = url\n r = session.put(url, json=asset)\n else:\n report['method'] = 'POST'\n r = session.post(endpoint, json=asset)\n\n try:\n r.raise_for_status()\n except requests.HTTPError:\n LOG.error('Saving asset failed: %s', r.content)\n LOG.error('Original asset: %s', asset)\n report['error'] = r.content\n yield report\n continue\n\n response = r.json()\n LOG.info('Saved asset: %s as %s', original_asset['id'], response['id'])\n report['dest_id'] = response['id']\n yield report", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def save_to_db(self, collect_results):\n\n logger.debug(f'saving {len(collect_results)} report files to database')\n cihpc_mongo = db.CIHPCMongo.get_default()\n\n results = list()\n for item in collect_results:\n\n # save logs first\n if item.logs and item.items:\n log_ids = cihpc_mongo.files.insert_many(item.logs).inserted_ids\n logger.debug(f'inserted {len(log_ids)} files')\n item.update(log_ids)\n\n # insert rest to db\n if item.items:\n results.append(cihpc_mongo.reports.insert_many(item.items))\n logger.debug(f'inserted {len(results)} reports')\n return results", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def flush(self):\n\n # save ddocs\n all_ddocs = self.all_docs(startkey=u\"_design\", endkey=u\"_design/\\u9999\", include_docs=True)\n ddocs = []\n for ddoc in all_ddocs:\n doc = ddoc['doc']\n old_atts = doc.get('_attachments', {})\n atts = {}\n for name, info in old_atts.items():\n att = {}\n att['content_type'] = info['content_type']\n att['data'] = self.fetch_attachment(ddoc['doc'], name)\n atts[name] = att\n\n # create a fresh doc\n doc.pop('_rev')\n doc['_attachments'] = resource.encode_attachments(atts)\n\n ddocs.append(doc)\n\n # delete db\n self.server.delete_db(self.dbname)\n\n # we let a chance to the system to sync\n times = 0\n while times < 10:\n if self.dbname in self.server:\n break\n time.sleep(0.2)\n times += 1\n\n # recreate db + ddocs\n self.server.create_db(self.dbname)\n self.bulk_save(ddocs)", "def test_scroll_returns_over_2x_size_docs(self):\n scroll_size = 3 # fetch N docs per \"scroll\"\n total_docs = (scroll_size * 2) + 1\n docs = self._index_many_new_docs(total_docs)\n self.assertEqual(len(docs), total_docs)\n self.assertEqual(docs_to_dict(docs),\n self._scroll_hits_dict({}, size=scroll_size))", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def clean_all_documents(cls):\n for index, text in enumerate(cls.documents):\n text_processed = cls.clean_document(text)\n cls.processed_documents.append(text_processed)", "def load_one_batch(adapter, nipt_results_path:str):\n \n batch_data = parse_batch_file(nipt_results_path)\n for sample in batch_data:\n mongo_sample = build_sample(sample)\n adapter.add_or_update_document(mongo_sample, adapter.sample_collection)\n mongo_batch = build_batch(batch_data[0])\n adapter.add_or_update_document(mongo_batch, adapter.batch_collection)", "def batch(self, request):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/batch\" % self.url_index_name, self.client.timeout, request)", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def create_doc_bulk(self, file: str, index: str) -> None:\n\n with open(file, \"r\") as f:\n bulk(self.es, self.make_documents(f, index))", "def save_documents(event, transcript_data):\n documents = [\n ('transcript_url', \"transcript\"),\n ('opening_statement_chair', \"chair opening statement\"),\n ('opening_statement_rm', \"ranking member opening statement\")\n ]\n\n for (field, note) in documents:\n url = transcript_data[field]\n save_document(url, note, event)", "async def create_many(\n self,\n documents: List[Dict[str, Any]],\n *,\n unique_key: Optional[str] = None,\n unique_filter: Optional[Dict[str, Any]] = None,\n session: Optional[Any] = None,\n **kwargs: Any,\n ) -> InsertManyResult:\n return await self._database.create_many(\n self.name,\n documents=documents,\n unique_key=unique_key,\n unique_filter=unique_filter,\n session=session,\n **kwargs,\n )", "def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def training_documents(self, training_documents):\n self._training_documents = training_documents", "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20", "def documents(sources, source_type, include):\n with commit():\n if source_type == 'migrator-kit':\n import_documents_from_record_file(sources, include)\n else:\n import_documents_from_dump(\n sources=sources,\n source_type=source_type,\n eager=True,\n include=include\n )", "def add_documents_to_gensim_dictionary(gensim_dictionary_model, text):\n gensim_dictionary_model.add_documents(text)", "def test_add_many_objects_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = []\n for x in range(doc_count):\n doc = Document()\n doc['user_id'] = user_ids[x]\n doc['data'] = data[x]\n doc['id'] = ids[x]\n documents.append(doc)\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def add(self, docs, commit=False):\r\n message = ElementTree.Element('add')\r\n for doc in docs:\r\n message.append(doc_to_elemtree(doc))\r\n m = ElementTree.tostring(message)\r\n response = self._update(m)\r\n if response.status != 200:\r\n raise SolrError(self._extract_error(response))\r\n # TODO: Supposedly, we can put a <commit /> element in the same post body\r\n # as the add element. That isn't working for some reason, and it would save us\r\n # an extra trip to the server. This works for now.\r\n if commit:\r\n self.commit()", "def retrieve_all_documents(\n self,\n collection_name: str,\n sort: List = [],\n asc: bool = True,\n include_vector: bool = True,\n include_fields: List = [],\n retrieve_chunk_size: int=1000,\n **kwargs\n ):\n num_of_docs = self.collection_stats(collection_name)['number_of_documents']\n with self.progress_bar(list(range(int(num_of_docs/ retrieve_chunk_size)))) as pbar:\n d = self.retrieve_documents(\n collection_name=collection_name, page_size=retrieve_chunk_size, sort=sort, asc=asc, include_vector=include_vector,\n include_fields=include_fields, **kwargs\n )\n all_docs = d[\"documents\"]\n pbar.update(1)\n while len(d[\"documents\"]) > 0:\n d = self.retrieve_documents(\n collection_name=collection_name,\n page_size=retrieve_chunk_size,\n cursor=d[\"cursor\"],\n sort=sort,\n asc=asc,\n include_vector=include_vector,\n include_fields=include_fields\n )\n all_docs += d[\"documents\"]\n pbar.update(1)\n return all_docs", "def add_url_content(query, documents):\n def scrape_and_update(doc):\n text = scrape(doc['url'])\n logger.debug('[SCRAPER]\\t Updating \"content\" for url %s', doc['url'])\n doc.update({'content': text})\n\n with ThreadPool(processes=10) as pool:\n for doc in documents:\n pool.apply_async(scrape_and_update, args=(doc,))\n pool.close()\n pool.join()\n\n # save the results for tests:\n mock_query_and_scraping.save_query_and_scraping_results(query, documents)", "def save_books(self,books):\n for book in books:\n self.save_book(book)" ]
[ "0.74635196", "0.68780833", "0.6735508", "0.66293705", "0.65308714", "0.64703435", "0.6461322", "0.6320846", "0.6297765", "0.6272216", "0.61952156", "0.618961", "0.6179138", "0.6159434", "0.61464745", "0.6131274", "0.6127692", "0.60197544", "0.60097003", "0.6002732", "0.59492266", "0.5928816", "0.5923453", "0.58978164", "0.589677", "0.5849082", "0.57858676", "0.5769239", "0.5731439", "0.5711891", "0.56931984", "0.5657347", "0.5654487", "0.56515706", "0.56505793", "0.5630766", "0.56246674", "0.5606292", "0.55824935", "0.5579383", "0.556257", "0.5555637", "0.553171", "0.5521033", "0.551346", "0.5511775", "0.548173", "0.54695386", "0.5457237", "0.54503185", "0.540772", "0.5405518", "0.5392169", "0.5386325", "0.53837734", "0.53832823", "0.5382851", "0.5368106", "0.53630924", "0.5347794", "0.53432447", "0.5338463", "0.53331834", "0.5317478", "0.5315519", "0.5291528", "0.52880883", "0.528746", "0.52854675", "0.527605", "0.5267764", "0.5260789", "0.52576995", "0.52454054", "0.5238945", "0.5227546", "0.52199", "0.52164656", "0.5205715", "0.5201928", "0.5193844", "0.51906544", "0.5188635", "0.5185937", "0.5170908", "0.51676077", "0.51547813", "0.51446277", "0.51416594", "0.51348156", "0.5123379", "0.5121274", "0.5120041", "0.5115607", "0.5102454", "0.5090106", "0.50856733", "0.50810915", "0.5066689", "0.50585014" ]
0.73529315
1
Loads a document database with the specified version from the directory.
def load(db_path="data/documents/trigrams", version=None): # create database at the desired path and with the desired version db = DocumentDatabase(db_path, version) # loop through batches for batch in db._get_batches(): # get the path to the stats file stats_file = db._get_batch_stat_file(batch) # load the stats stats_json = json.loads(codecs.open(stats_file, "rb", "utf-8").read()) # save in the batch statistics hash db.batch_stats[batch] = BatchStats(stats_json["total_sentences"]) # return the database return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_or_create_db(self):\n try:\n with open(self._filename, 'rb') as f:\n self.db = pickle.load(f)\n except FileNotFoundError:\n pass", "def load_DB(self):\n\t\tprint 'Loadind Data Base...'\n\t\tstream = open(self.DB_file)\n\t\tself.DB = cPickle.load(stream)\n\t\tstream.close()\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\tprint 'Loading completed'\n\t\treturn", "def load_db(path_to_db):\n db_run = db(path_to_db) # Instantiates the DB by reading the file\n db_run.import_config_db() # Imports configuration DB\n db_run.conn.row_factory = sqlite3.Row # Better select results\n return(db_run)", "def _load_vdb_with_mode(cls, vdb, mode): \n # {{\n db = anydbm.open(vdb.filename, mode)\n try:\n if db[\"--Reserved--type\"] != vdb.type:\n raise ValueError(\"Not a %s database\" % (vdb.type,))\n except KeyError:\n raise ValueError(\"Not a recognized database\")\n vdb.db = db\n # }}", "def openDB(self, dbpath, updateOnIdle=True):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), updateOnIdle )", "def load_db(file):\n if os.path.isfile(file):\n try:\n start = time.time()\n db = []\n with open(file, 'r') as f:\n for item in json_lines.reader(f):\n db.append(item)\n stop = time.time() - start\n print(\"load_db time: \", stop, 'sec')\n return db\n except Exception as e:\n print(file, \"is probably corrupted. Creating empty db now...\")\n DbManager.erase_db(file)\n raise e\n\n else:\n # corrupt...\n print(\"database not found. creating new\")\n DbManager.new_db(file)", "def load(file_path = \"database.pkl\"):\n while True:\n doLoad = input(\"Database contents will be overwritten. Proceed? (y/n): \")\n if doLoad == 'y':\n break\n elif doLoad == 'n':\n return\n else:\n continue\n try:\n with open(file_path, 'rb') as f:\n global person_database\n person_database = pickle.load(f)\n print(\"Database loaded.\")\n except:\n print(\"Database not available.\")", "def load_db(dbpath):\n\n if not os.path.exists(dbpath):\n print(\"Cannot find %s directory, rerun from MacInfoPkg directory!\" % dbpath)\n sys.exit(1)\n\n db = []\n\n for root, dirs, files in os.walk(dbpath):\n for file in fnmatch.filter(files, '*.yaml'):\n path = os.path.join(root, file)\n with open(path, 'r') as fh:\n try:\n db.append(yaml.safe_load(fh))\n except yaml.YAMLError as e:\n print(\"Failed to parse file %s - %s\" % (path, e))\n sys.exit(1)\n\n if len(db) == 0:\n print(\"Empty database!\")\n sys.exit(1)\n\n # Sorting is required for fast lookup.\n return sorted(db, key=operator.itemgetter('SystemProductName'))", "def get_document(self, docid):\n try:\n return self.sql_session.query(Document).get(docid)\n except OperationalError:\n raise IOError(\"Sorry, this database is incompatible with the \"\n \"current version of Luminoso. If you want, you can \"\n \"delete the model directory and start again.\")", "def database(db):\n if type(db) is str:\n # Database name\n if db.endswith('.py'):\n # Python source, exec it\n globals = {}\n exec(compile(open(db).read(), db, 'exec'), globals)\n if 'DB' in globals:\n db = globals['DB']\n else:\n storage = globals['Storage']\n from ZODB.DB import DB\n db = DB(storage, cache_size=4000)\n elif db.endswith(\".fs\"):\n from ZODB.DB import DB\n from ZODB.FileStorage import FileStorage\n storage = FileStorage(db)\n db = DB(storage, cache_size=4000)\n\n # The following will fail unless the application has been configured.\n from zope.event import notify\n notify(zope.processlifetime.DatabaseOpened(db))\n\n return db", "def db_file():\n return abspath('vmchecker.db')", "def _load_document(path, app):\n start_inventor()\n document_type_enum = {\n 12289: 'UnnownDocument',\n 12290: 'PartDocument',\n 12291: 'AssemblyDocument',\n 12292: 'DrawingDocument',\n 12293: 'PresentationDocument',\n 12294: 'DesignElementDocument',\n 12295: 'ForeignModelDocument',\n 12296: 'SATFileDocument',\n 12297: 'NoDocument',\n }\n try:\n app.Documents.Open(str(path))\n document_type = document_type_enum[app.ActiveDocumentType]\n doc = win32com.client.CastTo(app.ActiveDocument, document_type)\n print(doc, document_type)\n return doc\n except:\n print('unable to load file')\n return None", "def test_load_database_from_path(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n assert isinstance(database, DataBase)\n assert database.path is not None\n assert database.fast_logging is False", "def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()", "def db_version():\n return IMPL.db_version()", "def load_db(db_file):\n db = {}\n logging.info('loading weighted vectors from {0}'.format(db_file))\n with open(db_file, 'r') as f:\n for line in f:\n j = json.loads(line)\n db.update(j)\n return db", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def create_db(self, path: str) -> None:\n if os.path.isfile(path):\n self.db_path = path\n print(\"DB already exists\")\n return\n\n print(path)\n\n self.db_path = path\n\n print(\"Opening the base db\")\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'basedb.xml'), 'r') as f:\n base = f.read()\n print(\"Reading the base as {0}\".format(base))", "def database_file(file):\r\n fpath = path.join('databases', '{0}'.format(file))\r\n db_path = path.join(mod_path, fpath)\r\n return db_path", "def loadDatabase(database):\n for file_name in os.listdir(\"Users\"):\n chemin = os.path.join(\"Users\", file_name)\n key = file_name.lower()\n database[key]=pickle.load(open(chemin,\"rb\"))", "def import_db(import_file):\n import_data(import_file)", "def get_latest_version(db_path):\n\t\t\n\t\t# create a file system and return latest version\n\t\treturn VersionedFile(db_path).get_latest_version()", "def load_database(db_session, fixture):\n # TODO: the fixture file path controls\n\n # load the fixture\n datas = pickle.loads(fixture)\n db_session.add_all(datas)\n db_session.commit()\n print \"load database ok\"", "def loadDB(dir):\n\n try:\n infile = os.path.join(gbl.libPath, dir, mmadir)\n f=file(infile, \"rb\")\n f.readline() # Read/discard comment line\n g = pickle.load(f)\n f.close()\n return g\n except:\n pass\n\n return None", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def load(self) -> None:\n doc_ref = self.doc_ref\n if not isinstance(doc_ref, DocumentReference):\n return\n\n doc = doc_ref.get()\n if doc.exists:\n self.load_storage_model(doc.to_dict())", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db", "def get_db():\n with open(db_file) as f:\n db = json.load(f)\n return db", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def __get_db(self, folder):\n db_dir = os.path.join(self.home, self.ibooks_doc_root, folder)\n db_fullname = None\n\n if not os.path.exists(self.tmp_dir):\n os.makedirs(self.tmp_dir)\n\n for dfile in os.listdir(db_dir):\n src = os.path.join(db_dir, dfile)\n dst = os.path.join(self.tmp_dir, dfile)\n shutil.copy(src, dst)\n if dfile.endswith(\".sqlite\"):\n db_fullname = dst\n \n return db_fullname", "def _load(\n database_connection_url: str, controllers: Iterable[CRUDController], **kwargs\n) -> pymongo.database.Database:\n logger.info(f'Connecting to \"{database_connection_url}\" ...')\n database_name = os.path.basename(database_connection_url)\n if database_connection_url.startswith(\"mongomock\"):\n import mongomock # This is a test dependency only\n\n client = mongomock.MongoClient(**kwargs)\n else:\n # Connect is false to avoid thread-race when connecting upon creation of MongoClient (No servers found yet)\n client = pymongo.MongoClient(\n database_connection_url, connect=kwargs.pop(\"connect\", False), **kwargs\n )\n if \"?\" in database_name: # Remove server options from the database name if any\n database_name = database_name[: database_name.index(\"?\")]\n logger.info(f\"Connecting to {database_name} database...\")\n base = client[database_name]\n server_info = client.server_info()\n if server_info:\n logger.debug(f\"Server information: {server_info}\")\n _server_versions.setdefault(base.name, server_info.get(\"version\", \"\"))\n logger.debug(f\"Creating models...\")\n for controller in controllers:\n link(controller, base)\n return base", "def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})", "def populate_database(replace=False):\n print(\"Creating database from RDF cache\")\n if replace or not os.path.exists(db_name):\n if os.path.exists(rdf_dir):\n print('Exracting data from cataglog')\n gids = get_gids()\n cat = get_catalog(gids)\n print(\"Converting data into wide form\")\n cat_wide = get_catalog_wide(cat)\n print(\"Saving data to database\")\n save_catalog_to_db(cat_wide)\n else:\n print(\"No RDF cache. Run download-cache first.\")\n else:\n print(\"Database exists. To overwrite set '--replace True'\")", "def loadDocs(db, createAdmin=True):\n docs = collectDesignDocs()\n if createAdmin:\n docs[\"admins\"] = adminDoc\n docs[\"shiftspace\"] = adminUser\n for k, v in docs.items():\n print \"Loading %s\" % k\n db[k] = v\n print \"Design documents loaded.\"", "def set_db_file():\n\n return os.path.join(db_path, db_file)", "def __load_index(self):\n import os\n if not os.path.exists(self.__dir):\n filename=os.path.join(MY_STORE,self.__dir,INTERNAL_DB_FILE)\n else:\n filename=os.path.join(self.__dir,INTERNAL_DB_FILE)\n try:\n self.__handle = open(filename,self.__mode)\n except IOError, e:\n print 'Cannot create status file. Ensure you have permission to write'\n return False\n\n fcntl.flock(self.__handle.fileno(), fcntl.LOCK_EX)\n internal_db = dbm.open(filename, 'c', 0644 )\n self.__storage = shelve.Shelf(internal_db)\n return True", "def _load_version():\n version = session.get('version')\n\n if version is None:\n g.version = None\n else:\n g.version = version", "def open(self):\n if not self.filename:\n raise ValueError(\"Can only open on-disk databases\")\n self.db = dbm.open(self.filename, \"w\") #raises anydbm.error\n try:\n if self.db[\"--Reserved--type\"] != self.type:\n raise ValueError(\"Not a %s database\" % self.type)\n except KeyError:\n raise ValueError(\"Not a recognized database\")", "def openDB(self, dbpath, FskHz):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), FskHz)", "def connect(filename=DATABASE_FILENAME):\n if not path.exists(filename):\n raise FileNotFoundError(\"Database file not found: \" + filename)\n with open(filename, 'r', encoding=\"utf-8\") as f:\n return Database(json.load(f))", "def build_from_file_name (database, data_path, folder=None, spec=None):\n # chop the extension off\n temp = database.split(PATH_DELIM)\n name = database[:-3]\n folder_name = None\n store_point = None\n\n if len(temp) != 1:\n folder_name = PATH_DELIM.join(temp[:-1])\n name = temp[-1][:-3]\n\n if folder_name is not None and not folder:\n search = folder_name\n if PATH_DELIM in folder_name:\n # we need to look recursively, but not yet\n search = folder_name.split(PATH_DELIM)[0]\n\n try:\n store_point = globals()[search.replace(\".db\", \"\")]\n except KeyError:\n pass\n\n if PATH_DELIM in folder_name:\n # now recurse\n searches = folder_name.split(PATH_DELIM)[1:]\n for search in searches:\n try:\n store_point = getattr(store_point, search.replace(\".db\", \"\"))\n except AttributeError:\n break\n\n elif folder:\n store_point = folder\n\n if spec is None:\n if store_point is not None and store_point.spec is not None:\n spec_obj = store_point.spec\n else:\n spec = name + \".spec\"\n if os.path.exists(os.path.join(data_path, spec)):\n spec_obj = parse_spec(os.path.join(data_path, spec))\n else:\n spec_obj = str\n else:\n spec_obj = spec\n\n dbfile = open(os.path.join(data_path, database), \"r\")\n dbfile_contents = [item.strip() for item in dbfile.read().strip().strip(\"%\").split(\"%\")]\n dbdata = [spec_obj(item) for item in dbfile_contents if not item.startswith(\"#\")]\n db = Database\n if hasattr(dbdata[0], 'weight'):\n db = WeightedDatabase\n\n this_db = db(name, dbdata)\n dbfile.close()\n\n if store_point:\n store_point.append(this_db)\n else:\n globals()[name] = this_db\n\n _dbobjects.append(this_db)", "def load_db(db_path):\n return pd.read_csv(db_path)", "def load_db(self, dbname, verbose=0, db=\"sqlite\"):\n\n if db == \"sqlite\":\n db_loader = pm.database.sqlite.load\n elif db == \"pickle\":\n db_loader = pm.database.pickle.load\n elif db == \"hdf5\":\n db_loader = pm.database.hdf5.load\n elif db == \"txt\":\n db_loader = pm.database.txt.load\n\n # Ignore annoying sqlite warnings\n warnings.simplefilter(\"ignore\", UserWarning)\n\n # Open database\n db = db_loader(dbname)\n\n # Create mcmc instance reading from the opened database\n self.mc = pm.MCMC(self.nodes_db.node, db=db, verbose=verbose)\n\n # Not sure if this does anything useful, but calling for good luck\n self.mc.restore_sampler_state()\n\n return self", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def open(self):\r\n if not self.filename:\r\n raise ValueError(\"Can only open on-disk databases\")\r\n self.db = anydbm.open(self.filename, \"w\") #raises anydbm.error\r\n try:\r\n if self.db[\"--Reserved--type\"] != self.type:\r\n raise ValueError(\"Not a %s database\" % self.type)\r\n except KeyError:\r\n raise ValueError(\"Not a recognized database\")", "def _set_database_version(db, version):\n if not isinstance(version, int):\n raise TypeError(\"Version must be integer, not %s : %s\" % (\n version, type(version)))\n create_metadata = \\\n \"CREATE TABLE %s (version INT)\" % METADATA_COLUMN_NAME\n execute_sql(db, create_metadata)\n insert_version = \\\n \"INSERT INTO %s VALUES (%s)\" % (METADATA_COLUMN_NAME, version)\n execute_sql(db, insert_version)", "def get_version(self, directory, version_file_name='.version'):\n if self.path_exists(directory) and (version_file_name in os.listdir(directory)):\n f = open(directory + '/' + version_file_name)\n version = f.read()\n f.close()\n return version\n return None", "def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)", "def load_file_data_from_db(sip, base_path):\n my_entry = FSEntries(sip)\n md_object = add_collection_name(my_entry.md_info, base_path)\n return md_object", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def load_db(self) -> dict:\n try:\n self.logger.info(f'Loading the database from {self.db_file}')\n return json.loads(open(self.db_file, 'r').read())\n except FileNotFoundError:\n self.logger.info('Could not find existing database')\n return {}", "def load_database(database_type):\n f = open(\"database.p\", \"rb\")\n database = pickle.load(f)\n f.close()\n\n if database_type is \"dict\":\n return database\n elif database_type is \"list\":\n return database.values()", "def db_version(engine):\n return IMPL.db_version(engine)", "def init(dbname=\"shiftspace\"):\n server = core.server()\n if not server.__contains__(dbname):\n print \"Creating database %s.\" % dbname\n server.create(dbname)\n else:\n print \"%s database already exists.\" % dbname\n db = server[dbname]\n loadDocs(db)", "def open_db(filepath) -> str:\r\n name = path.basename(filepath)\r\n db = QSqlDatabase.addDatabase(\"QSQLITE\", connectionName=name)\r\n db.setDatabaseName(filepath)\r\n if db.open():\r\n return name\r\n else:\r\n return ''", "def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")", "def __init__(self, db_file):\n self.db = TinyDB(db_file)\n # TODO: implement db files rotation, for now just replace all data\n self.db.truncate()\n # self.db_path = pathlib.Path(db_file)\n # self.db_file_name = self.db_path.name\n # self.db_dir = self.db_path.parent", "def load_document(self, file_type, file_name):\n\n status, output = commands.getstatusoutput(\"mongoimport -h %s -p %s -u %s -p %s -d %s -c %s --type %s --file %s --headerline\" % (self.host, self.port, self.username, self.password, self.db_name, self.collection_name, file_type, file_name)) \n print \"status is\", status\n print \"output is\", output", "def get_version_details(self, project_id, document_id, version=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document_id) + '/'\n if version is not None: \n param = {\n 'version': version\n }\n else:\n param = None\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)[0]", "def load_index(self, fn):\n name = fn.split('.pkl')[0]\n return utils.load_obj(name)", "def upgradedb(self, args):\n upgrade_db(args.dbfile)", "def stampdb(self, args):\n revision = REVISION_MAPPING[args.configversion]\n print(f\"Based on config version {args.configversion} \"\n f\"we think your results schema is version {revision} and are upgrading to it\")\n stamp_db(revision, args.dbfile)", "def get_db(file_path):\n db_new = not os.path.isfile(file_path)\n sqlite3_detect_types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES\n db = sqlite3.connect(file_path, detect_types=sqlite3_detect_types)\n if db_new:\n create_db(db)\n return db", "async def reload_database(self, schema='conf/schema.sql'):\n with open(schema) as schema:\n await self.dao.build((schema.read()))", "def ensure_db(name, version):\n\n # generate db name from given script name\n head, tail = os.path.split(os.path.realpath(name))\n filename, _ = os.path.splitext(tail)\n dbname = '%s/__physlcache__/%s.db' % (head, filename)\n\n if not os.path.exists(dbname):\n # make sure directory exists\n dbdir = '%s/__physlcache__' % head\n if not os.path.exists(dbdir):\n os.makedirs(dbdir)\n\n # database does not exist, create\n db = sqlite3.connect(\n dbname, detect_types=sqlite3.PARSE_DECLTYPES)\n c = db.cursor()\n\n # create table 'version'\n c.execute(\"\"\"\n CREATE TABLE version (\n version INTEGER,\n created TEXT)\n \"\"\")\n c.execute(\"\"\"\n INSERT INTO version\n (version, created)\n VALUES(?, ?)\n \"\"\", (version, datetime.datetime.now()))\n\n # create table 'functions'\n c.execute(\"\"\"\n CREATE TABLE functions (\n funcname TEXT PRIMARY KEY,\n physl TEXT NOT NULL,\n ast TEXT NOT NULL)\n \"\"\")\n\n c.close()\n db.commit()\n\n return dbname, db, True\n\n # assume db exists, simply connect\n return dbname, sqlite3.connect(dbname), False", "def __init__(self, db_file):\n pass", "async def save_db_version(self, db_version: int):\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\"UPDATE db_version SET db_version = %s WHERE 1 LIMIT 1\", (db_version,))\n await conn.commit()", "def _load_version(cls, unpickler, version):\n model = unpickler.load()\n if version == 0:\n feature = model._state['features']\n model._state['output_column_name'] = 'extracted.' + feature\n return model", "def load_directory_as_db(self, dir_path, db_name):\n load_dir = os.path.join(self.data_dir, dir_path)\n data_files = glob.glob(os.path.join(load_dir, '*.txt'))\n file_groups = defaultdict(list)\n for path in data_files:\n path_noext, _ = os.path.splitext(path)\n filename_noext = os.path.basename(path_noext)\n i = filename_noext.find('-')\n if i == -1:\n table_name = filename_noext\n else:\n table_name = filename_noext[:i]\n file_groups[table_name].append(path)\n\n for table_name in sorted(file_groups.keys()):\n register_name = '{}_{}'.format(db_name, table_name)\n data_files = file_groups[table_name]\n logger.info('REGISTERING {}:{}'.format(register_name, data_files))\n data_files = filter(lambda x: os.path.getsize(x) > 0, data_files)\n if self.load_tables and register_name not in self.load_tables:\n continue\n jdb = self.sql_context.read.json(data_files)\n jdb.printSchema()\n jdb.registerTempTable(register_name)", "def read():\n with open(DBNAME) as f:\n foo = pickle.loads(f.read())\n print foo", "def _init(self):\n if os.path.exists(self.fname):\n with open(self.fname, \"rb\") as fh:\n self.db = pickle.load(fh)\n else:\n self.db = {}\n print(\"DB loaded, len\", len(self.db))", "def loadDbFromDisk (self):\n currentThread=threading.currentThread()\n self._logIo(\"load-db-from-disk\").debug1(\"starting to load db. thread-id=%d\", currentThread.ident)\n\n startLoadingTime = time.time()\n if os.path.exists(self._dbFileFullNamePath):\n try:\n self._cidLastAccessTimeDict = a.infra.format.json.readFromFile(self._logIo, self._dbFileFullNamePath)\n self._logIo(\"read-db-from-disk\").debug1(\"loading db - reading from file time is: %.6f\", time.time() - startLoadingTime)\n startValidatingTime = time.time()\n self._scanLastAccessDictForJunk()\n self._logIo(\"validate-db\").debug1(\"loading db - validating data time is: %.6f\", time.time() - startValidatingTime)\n except Exception as ex:\n self._logIo(\"error-read-db-file\").error(\"error reading db file='%s'. exception: %s\", self._dbFileFullNamePath, ex)\n\n if os.path.exists(self._countersFileFullNamePath):\n try:\n countersData = a.infra.format.json.readFromFile(self._logIo, self._countersFileFullNamePath)\n \n # load prediction counters for presistency\n self.counters['numTotalPredictionRemovedBytes'] = countersData['numTotalPredictionRemovedBytes']\n except Exception as ex:\n self._logIo(\"error-read-counters-file\").error(\"error reading counters file='%s'. exception: %s\", self._countersFileFullNamePath, ex)\n self._logIo(\"read-db-from-disk\").debug1(\"loading db - total time is: %.6f\", time.time() - startLoadingTime)", "def load_from_json(path_to_db):\r\n with open(path_to_db, 'r') as fproc:\r\n data_ = json.load(fproc)\r\n \r\n return data_", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def load(self):\n basepath = os.path.dirname(os.path.abspath(__file__))\n filename = os.sep.join([basepath, c.FOLDER_JSON, c.FILE_GAME_VERSIONS])\n Handler.ALL_VERS_DATA = {} # reset known data; do not retain defunct information\n with open(filename, \"r\") as f:\n data = json.loads( f.read() )\n self.update(data)\n self._updated = False\n #for v,record in iteritems(Handler.ALL_VERS_DATA):\n # print(type(v), v)\n #for k,v in iteritems(record): ", "def update(self):\n if not os.path.exists(self._db_file):\n return\n with open(self._db_file, 'r') as fp:\n self.from_dict(json.load(fp), \"JSON\")", "def detect_database(database_path):\n if database_path[-3:] != '.db':\n return False\n if not os.path.exists(database_path):\n return False\n return True", "def GetDatabase(self):\r\n\r\n if self.database:\r\n return self.database\r\n \r\n if not os.path.exists(self.GetDataDir()):\r\n # Create the data folder, it still doesn't exist\r\n os.makedirs(self.GetDataDir())\r\n\r\n self.database = os.path.join(self.GetDataDir(), \"NDT_Database.db\")\r\n return self.database", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def get_db():\n\tpath = get_path_db()\n\tif path is None:\n\t\tprint(\"\\n=> Info - Cannot fetch database yet because it has not been configured.\\n\")\n\telse:\n\t\tdb = SqliteExtDatabase(path)\n\t\treturn db", "def read_db(DB):\n db = kyotocabinet.DB()\n if not db.open(DB, kyotocabinet.DB.OWRITER | kyotocabinet.DB.OCREATE):\n sys.stderr.write('ERROR: failed to open: %s\\n' % db.error())\n return db", "def test_upper_version(self):\n filename = str(uuid.uuid4())\n _silentremove(filename)\n dburi = \"sqlite:///%s\" % filename\n db = connection.connect(dburi, create=True, verbose=False)\n dbversion = DBVersion()\n dbversion.version = CURRENT_DB_VERSION + 1\n dbversion.version_number = CURRENT_DB_VERSION + 1\n dbversion.version_timestamp = datetime.datetime.now().strftime(\"%s\")\n db.add(dbversion)\n db.commit()\n\n self.assertRaises(DBAdminError, db_current_version, db)\n self.assertRaises(DBAdminError, db_verify, db)\n\n _remove(filename)", "def database():\n return conf().database", "def open_database(url, db_format=None, context=None):\n\n if database_exists(url):\n\n if db_format == \"archive\":\n return ArchiveDB(url, context)\n elif db_format == \"anomaly\":\n return AnomolyDB(url, context)\n elif db_format == \"metadata\":\n return MetadataDB(url, context)\n return dataset.connect(url)\n\n elif db_format is not None:\n url_ = make_url(url)\n if url_.drivername == \"sqlite\":\n makedirs(dirname(url_.database), exist_ok=True)\n\n hdb = HypernetsDBBuilder(context)\n return hdb.create_db_template(url, db_format)\n\n return None", "def load(cls, backend, path, obj):\n b = backend\n id = obj['id']\n name = obj['name']\n desc = obj['description']\n url = obj['url']\n path = os.path.join(path, id)\n\n # Load the index.json file.\n idx = b.read_json(os.path.join(path, 'index.json'))\n if idx['ApiVersion'] != 0:\n return None\n\n versions = []\n for vsn_obj in idx['Versions']:\n # TODO: Maybe check if the version file exists?\n versions.append(dict(\n id=vsn_obj['Id'],\n name=vsn_obj['Name'],\n ))\n\n return cls(b, id, name, desc, url, path, versions)", "def initialize_database(db_config_file):\n with open(db_config_file, 'r') as f:\n db_config = yaml.load(f)\n client = MongoClient(\"mongodb://\" + db_config['user'] + \":\" + urllib.quote_plus(db_config['passwd']) + \"@\" +\n db_config['host'] + \":\" + str(db_config['port']) + \"/\" + db_config['db'])\n db = client[db_config['db']]\n collection = db[db_config['collection']]\n return collection", "def test_quest_load_version_fail(testing_quest_page):\n testing_quest_page.save()\n\n # fetch the data\n doc = testing_quest_page.doc_ref.get()\n data = testing_quest_page.storage_model.parse_obj(doc.to_dict())\n\n # mess with the version\n data.version = str(VersionInfo.parse(data.version).bump_major())\n testing_quest_page.doc_ref.set(data.dict())\n\n # try to load with the bad version\n with pytest.raises(QuestLoadError):\n testing_quest_page.load()\n\n # cleanup\n testing_quest_page.delete()", "def init_db(configuration):\n db = ZODB.config.databaseFromString(configuration)\n for init in IDBInitializer.subscription(db):\n init(db)\n return db", "def __init__(self, data_path='data', db_params=ideagens):\n my_path = path.abspath(data_path)\n self.path = my_path\n\n self.db_params = db_params\n self.db = get_db(self.db_params)", "def __init__(self, db_path, db_name):\n self.db_path = db_path\n self.db_name = db_name", "def init_db(database_url: str, fidesctl_config: FidesctlConfig) -> None:\n alembic_config = get_alembic_config(database_url)\n upgrade_db(alembic_config)\n load_default_taxonomy(fidesctl_config)", "def __init__(self, filename=None):\r\n BaseDB.__init__(self, filename, \"verifier\")", "def load_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"PYTHONPATH\": os.path.abspath(os.path.dirname(__file__)),\n \"LANG\": \"en_US.UTF-8\",\n \"POSTGRES_DB\": dbname,\n \"POSTGRES_HOST\": \"localhost\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PORT\": \"5432\",\n }\n\n env = os.environ\n env.update(default_env)\n\n psql_command = (\n f'psql -h {default_env[\"POSTGRES_HOST\"]} '\n f'-p {default_env[\"POSTGRES_PORT\"]} '\n f'-U {default_env[\"POSTGRES_USER\"]}'\n )\n\n c.run(f'{psql_command} postgres -c \"drop database if exists {dbname}\";', env=env)\n c.run(f'{psql_command} postgres -c \"create database {dbname}\";', env=env)\n c.run(f\"{psql_command} {dbname} < {fpath}\", env=env)\n # update test db to the latest migrations\n c.run(f\"alembic -c ./alembic.ini upgrade head\", env=env)", "def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()", "def loadDatabase ():\n database = []\n # Open a file\n path = \"lyd/\"\n dirs = os.listdir( path )\n \n # This prints all of the files and directories\n for file in dirs:\n if file == \".DS_Store\": #Mac file\n continue\n songdict = {}\n print (file)\n Zxx = STFTsignal.getSTFTofFile(path + file) #STFT of the file\n #mean, eigen and weights are stored in dictionary songdict\n songdict[\"mean\"], songdict[\"eigen\"], songdict[\"weights\"] = PCA(Zxx)\n songdict[\"name\"] = file\n database.append (songdict) \n return database", "def load_dict_from_db(path_to_db):\n with open(path_to_db, mode='rb') as handle:\n result = pickle.loads(handle.read())\n\n return result", "def test_load_casefile_from_database():\n\n year = int(os.environ['TEST_YEAR'])\n month = int(os.environ['TEST_MONTH'])\n\n casefile = load_xml_from_archive(data_dir=os.environ['CASEFILE_DIR'],\n year=year, month=month, day=1, interval=1)\n\n assert isinstance(casefile, bytes)", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "def load_document(url):\n files = {\n AnnotationWriter.JSONLD_CONTEXT: \"anno.jsonld\",\n AnnotationWriter.LDP_CONTEXT: \"ldp.jsonld\"\n }\n if url in files:\n base_path = os.path.join(os.path.split(__file__)[0], 'jsonld')\n jsonld_file = os.path.join(base_path, files[url])\n data = open(jsonld_file).read()\n doc = {\n \"contextUrl\": None,\n \"documentUrl\": url,\n \"document\": data\n }\n return doc\n else:\n return jsonld.load_document(url)" ]
[ "0.6015772", "0.59988827", "0.59276336", "0.5849796", "0.5840075", "0.5790545", "0.5701773", "0.56872916", "0.5661583", "0.56539536", "0.56524754", "0.5596583", "0.5581138", "0.5557227", "0.5549356", "0.5532614", "0.55302", "0.5490099", "0.54712546", "0.5464255", "0.5438158", "0.5426713", "0.541934", "0.5417999", "0.5407309", "0.5399795", "0.5398745", "0.539352", "0.5353477", "0.53529686", "0.5346994", "0.53467584", "0.53398526", "0.53284985", "0.53282034", "0.53214765", "0.5298798", "0.5292388", "0.52896833", "0.528312", "0.52768207", "0.5269408", "0.52645516", "0.52615625", "0.5260056", "0.52243364", "0.52011037", "0.518276", "0.51730597", "0.5170656", "0.5165385", "0.5159575", "0.51547194", "0.51507324", "0.5147589", "0.51467717", "0.5136005", "0.5119046", "0.5115588", "0.50965023", "0.50941056", "0.5086741", "0.5079562", "0.50569224", "0.50542593", "0.50486046", "0.5029753", "0.5028517", "0.5018496", "0.5005665", "0.5005152", "0.5003396", "0.50030583", "0.5002781", "0.49989808", "0.4995315", "0.49894994", "0.49869198", "0.49736863", "0.49649185", "0.4964564", "0.4961693", "0.49494588", "0.49430168", "0.49422926", "0.49375713", "0.4937461", "0.49374226", "0.4932831", "0.49326137", "0.49320707", "0.49311504", "0.49244592", "0.4909708", "0.4894484", "0.4892995", "0.48906985", "0.48844644", "0.4883165", "0.48831347" ]
0.7193331
0
Returns the latest version of the documents inventory at the specified path.
def get_latest_version(db_path): # create a file system and return latest version return VersionedFile(db_path).get_latest_version()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "async def get_local_version(self, path):\n return_value = ''\n if os.path.isfile(path):\n with open(path, 'r') as local:\n ret = re.compile(\n r\"^\\b(VERSION|__version__)\\s*=\\s*['\\\"](.*)['\\\"]\")\n for line in local.readlines():\n matcher = ret.match(line)\n if matcher:\n return_value = str(matcher.group(2))\n return return_value", "def get_version_from_recent_files(self):\n # full_path = self.fusion_prefs[\"LastCompFile\"]\n # return self.get_version_from_full_path(full_path)\n\n version = None\n rfm = RecentFileManager()\n\n try:\n recent_files = rfm[self.name]\n except KeyError:\n logger.debug('no recent files')\n recent_files = None\n\n if recent_files is not None:\n for i in range(len(recent_files)):\n version = self.get_version_from_full_path(recent_files[i])\n if version is not None:\n break\n\n logger.debug(\"version from recent files is: %s\" % version)\n\n return version", "def get_version(path=VERSION_PATH):\n namespace = {}\n exec(read(path), namespace)\n return namespace['get_version'](short=True)", "def getRowFromPath(self, path):\n query = \"SELECT resource_id, etag, title FROM docs WHERE local_path = ?\"\n res = self.db.execute(query, (path,)).fetchone()\n return res", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version", "def __queryLatest(versionsPath, versionPattern):\n version = 0\n patternParts = __splitVersionPattern(versionPattern)\n versionRegEx = \"^\"+patternParts['prefix']+\"[0-9]{\"+str(len(patternParts['padding']))+\",}\"+patternParts['suffix']+\"$\"\n\n # finding the latest version\n if os.path.exists(versionsPath):\n for directory in os.listdir(versionsPath):\n if re.match(versionRegEx, directory):\n version = max(\n int(verNumber(directory, versionPattern)),\n version\n )\n return version", "def get_file(self, path):\n return self.client._perform_raw(\n \"GET\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version", "def get_current_version(self):\n #full_path = self._root.knob('name').value()\n full_path = os.path.normpath(\n self.comp.GetAttrs()['COMPS_FileName']\n ).replace('\\\\', '/')\n return self.get_version_from_full_path(full_path)", "def get_latest_file(path):\n try:\n latest_iteration = get_latest_iteration(path)\n return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))\n except ValueError:\n return None", "def get_version(course_path):\r\n format_file = course_path / EXPORT_VERSION_FILE\r\n if not format_file.isfile():\r\n return 0\r\n with open(format_file, \"r\") as f:\r\n data = json.load(f)\r\n if EXPORT_VERSION_KEY in data:\r\n return data[EXPORT_VERSION_KEY]\r\n\r\n return None", "def get_version_details(self, project_id, document_id, version=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document_id) + '/'\n if version is not None: \n param = {\n 'version': version\n }\n else:\n param = None\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)[0]", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def getversion_nightly(path=None): # pragma: no cover\n if not path:\n path = _get_program_dir()\n\n with open(os.path.join(path, 'version')) as data:\n (tag, rev, date, hsh) = data.readlines()\n\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n\n if not date or not tag or not rev:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def _get_version(self, identifier: Identifier,\n version: Optional[int] = None) -> DocMetadata:\n parent_path = self._get_parent_path(identifier=identifier,\n version=version)\n path = os.path.join(parent_path,\n (f'{identifier.filename}.abs' if not version\n else f'{identifier.filename}v{version}.abs'))\n return self.parse_abs_file(filename=path)", "def getVersion(self):\n self.getDocumentedObject().getVersion()", "def getRepoRev(self, path):\r\n\r\n if self.verbose:\r\n print(\"INFO : Getting info in {}\".format(path))\r\n\r\n rev = None\r\n with workInDirectory(path):\r\n\r\n rev_cmd_args = ['git', 'rev-parse', 'HEAD']\r\n\r\n if self.verbose:\r\n print(\"INFO : Running command : {}\".format(\" \".join(rev_cmd_args)))\r\n\r\n rev = SubProcessUtility.runCommand(rev_cmd_args)\r\n\r\n if rev == None:\r\n print(\"Unable to get revision for {}, make sure config is correct\".format(path))\r\n\r\n return rev", "def get_latest_saved(self):\n doc = (get_latest_released_app_doc(self.domain, self._id)\n or get_latest_build_doc(self.domain, self._id))\n return self.__class__.wrap(doc) if doc else None", "def get_version_from_full_path(cls, full_path):\n if full_path is None or full_path == \"\":\n return\n\n logger.debug(\"full_path: %s\" % full_path)\n # convert '\\\\' to '/'\n full_path = os.path.normpath(os.path.expandvars(full_path)).replace(\"\\\\\", \"/\")\n\n # trim repo path\n from stalker import Repository, Version\n\n os_independent_path = Repository.to_os_independent_path(full_path)\n\n # try to get a version with that info\n logger.debug(\"getting a version with path: %s\" % full_path)\n\n version = Version.query.filter(Version.full_path == os_independent_path).first()\n logger.debug(\"version: %s\" % version)\n return version", "def get_rev(self, docid):\n response = self._request_session.head(self._database_path(docid))\n try:\n response.raise_for_status()\n except HTTPError as e:\n if e.response.status_code == 404:\n raise ResourceNotFound\n raise\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag\n return response.headers['ETag'].strip('\"').lstrip('W/\"')", "def get_version(self, directory, version_file_name='.version'):\n if self.path_exists(directory) and (version_file_name in os.listdir(directory)):\n f = open(directory + '/' + version_file_name)\n version = f.read()\n f.close()\n return version\n return None", "def svn_rev_info(path): # pragma: no cover\n if not os.path.isdir(os.path.join(path, '.svn')):\n path = os.path.join(path, '..')\n\n _program_dir = path\n filename = os.path.join(_program_dir, '.svn/entries')\n if os.path.isfile(filename):\n with open(filename) as entries:\n version = entries.readline().strip()\n if version != '12':\n for _ in range(3):\n entries.readline()\n tag = entries.readline().strip()\n t = tag.split('://', 1)\n t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',\n '')\n tag = '[{}] {}'.format(*t)\n for _ in range(4):\n entries.readline()\n date = time.strptime(entries.readline()[:19],\n '%Y-%m-%dT%H:%M:%S')\n rev = entries.readline()[:-1]\n return tag, rev, date\n\n # We haven't found the information in entries file.\n # Use sqlite table for new entries format\n from sqlite3 import dbapi2 as sqlite\n with closing(\n sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:\n cur = con.cursor()\n cur.execute(\"\"\"select\nlocal_relpath, repos_path, revision, changed_date, checksum from nodes\norder by revision desc, changed_date desc\"\"\")\n _name, tag, rev, date, _checksum = cur.fetchone()\n cur.execute('select root from repository')\n tag, = cur.fetchone()\n\n tag = os.path.split(tag)[1]\n date = time.gmtime(date / 1_000_000)\n return tag, rev, date", "def getMostRecentPublishedEdit(self, show, sequence, version):\n\n # grab shot edits cache path\n mode = Mode(show, sequence)\n shotEditsCachePath = mode.get('[editorialFLEFilesCache]')\n\n # load the file and grab the published versions\n root = self.fileService.loadXMLFile(shotEditsCachePath)\n isPublished = lambda e: e.attrib['published'] == 'true'\n publishedEdits = filter(isPublished, root.getchildren())\n versions = map(lambda e: int(e.attrib['version']), publishedEdits)\n\n # drop edits after the requested version\n versions.sort(reverse=True)\n versions = list(itertools.dropwhile(lambda v: v >= int(version), versions))\n\n # return path to fle\n if len(versions) > 0:\n return flix.core2.shotCutList.ShotCutList.getDefaultPath(mode, versions[0])\n\n # couldn't find a valid publisehd version\n return None", "def ReadVersion():\n return _ReadNumericFile(pathutils.JOB_QUEUE_VERSION_FILE)", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def get_latest_revision(self):\n revision_list = self.get_revision_list()\n if revision_list:\n return revision_list[-1]\n else:\n raise NoRevisionsExistError()", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass", "def get_revision(path):\n return get_output(['hg', 'parent', '--template', '{node|short}'], cwd=path)", "def latest_product_version(product):\n return product.productversions.order_by(\"-created_on\").first()", "def get_last_revision(self):\n return self.index.get_index_revision(self.name)", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def read_version(path):\n version = None\n if os.path.exists(path):\n version = open(path, 'r', encoding='utf-8').read().strip()\n\n if version:\n return re.sub(r'^python-', '', version)\n\n return version", "def _get_file(self, path: str) -> Tuple[str, bytes]:\n self._trace(\"fetching: %s\" % path)\n meta, resp = self._connection.files_download(path)\n return (meta.rev, resp.content)", "def _get_latest_content(self):\n if self._modified is None:\n self._load_content()\n return\n\n # check if data updated\n statinfo = os.stat(self._path)\n if statinfo.st_mtime > self._modified:\n self._load_content()", "def _get_latest_chapter(self, directory):\n files = os.listdir(directory)\n if files:\n print(\"Last saved chapter: \", files[-1])\n last_chapter = files[-1][:-4]\n return self.indices.get(last_chapter, -1)\n return -1", "def getversion_onlinerepo(path: str = 'branches/master'):\n # Gerrit API responses include )]}' at the beginning,\n # make sure to strip it out\n buf = fetch(\n 'https://gerrit.wikimedia.org/r/projects/pywikibot%2Fcore/' + path,\n headers={'user-agent': '{pwb}'}).text[4:]\n try:\n return json.loads(buf)['revision']\n except Exception as e:\n raise VersionParseError(f'{e!r} while parsing {buf!r}')", "def get_latest_version(self, name):\n return self.filter(name=name).order_by('schema_version').last()", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def get_latest_version(self, did, has_version=None):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == did)\n\n try:\n record = query.one()\n baseid = record.baseid\n except NoResultFound:\n baseid = did\n except MultipleResultsFound:\n raise MultipleRecordsFound('multiple records found')\n\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.baseid == baseid) \\\n .order_by(IndexRecord.created_date.desc())\n if has_version:\n query = query.filter(IndexRecord.version.isnot(None))\n record = query.first()\n if (not record):\n raise NoRecordFound('no record found')\n\n return record.to_document_dict()", "def get_revit_version_from_path(rvt_install_path):\n\n def LOWORD(dword):\n return dword & 0x0000ffff\n\n def HIWORD(dword):\n return dword >> 16\n\n pe = pefile.PE(rvt_install_path)\n ms = pe.VS_FIXEDFILEINFO.ProductVersionMS\n ls = pe.VS_FIXEDFILEINFO.ProductVersionLS\n return '20{}'.format(HIWORD(ms))", "def get_last_revision(filename):\n files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR,filename) + '-??.txt')\n if files:\n sorted_files = sorted(files)\n return get_revision(sorted_files[-1])\n else:\n raise Exception('last revision not found in archive')", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def which(self, name):\n if not len(self):\n self.update()\n try:\n return [version for version in self if os.path.basename(version) == name][0]\n except IndexError:\n return None", "def find_release_number():\n oa_version_files = [\n \"inventory/group_vars/all/all.yml\",\n \"group_vars/all/all.yml\",\n \"playbooks/inventory/group_vars/all.yml\",\n ]\n for filename in oa_version_files:\n try:\n with open(filename, \"r\") as vf:\n version = yaml.safe_load(vf)[\"openstack_release\"]\n found_file = filename\n break\n except FileNotFoundError:\n pass\n else:\n raise FileNotFoundError(\"No file found matching the list of files\")\n return version, found_file", "def _find_latest():\n try:\n db = get_master_collection()\n service_details = db.find({\"master.key\": \"release\"}).sort([(\"master.value\", pymongo.DESCENDING)]).limit(1)\n for service in service_details:\n for r in sorted(service[\"master\"][\"value\"], reverse=True):\n latest_release = r\n build_list = service[\"master\"][\"value\"][r]\n break\n break\n\n latest_rel_num = str(latest_release).replace(\"_\", \".\")\n build_list = _natural_sort(build_list)\n for build in build_list:\n latest_build = build\n break\n\n latest_build_num = latest_build\n second_latest_build_num = int(latest_build_num) - 1\n latest = {\"latest_val\": latest_rel_num + \"_\" + latest_build_num,\n \"second_latest_val\": latest_rel_num + \"_\" + str(second_latest_build_num)}\n except Exception as e:\n logger.error(\"Exception in _find_latest : \" + str(e))\n return latest", "def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None", "def get_latest_log(path_logs=None):\n path_logs = getOption('JournalPath')\n if not path_logs:\n user_path = environ.get('USERPROFILE')\n if user_path:\n path_logs = user_path + \"/Saved Games/Frontier Developments/Elite Dangerous\"\n from settings_api import setOption\n setOption('JournalPath', path_logs)\n else:\n raise FileNotFoundError(\"Journal path not found, define it in configs\")\n list_of_logs = [join(path_logs, f) for f in listdir(path_logs) if\n isfile(join(path_logs, f)) and f.startswith('Journal.')]\n if not list_of_logs:\n return None\n latest_log = max(list_of_logs, key=getmtime)\n return latest_log", "def get_version(self):\r\n\r\n return self.versions[0].number", "def get_version(self):\r\n if not self.endpoint_checker(self.endpointurl):\r\n raise Exception(\"Please use a valid ESRI REST url\")\r\n\r\n parsedurl = urlparse(self.endpointurl)\r\n print(f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\")\r\n req = requests.get(\r\n f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\"\r\n )\r\n\r\n if req.status_code == 200:\r\n try:\r\n return req.json()[\"currentVersion\"]\r\n except KeyError:\r\n try:\r\n req = requests.get(\r\n self.endpointurl.split(\"services/\")[0] + \"services/?f=pjson\"\r\n )\r\n return req.json()[\"currentVersion\"]\r\n except Exception as e:\r\n raise e\r\n raise Exception(\r\n f\"An Error occurred retrieving vital information, the response status {str(req.status_code)} associate with {req.json()['error']['message']}\"\r\n )", "def getversion_svn(path=None): # pragma: no cover\n _program_dir = path or _get_program_dir()\n tag, rev, date = svn_rev_info(_program_dir)\n hsh, date2 = github_svn_rev2hash(tag, rev)\n if date.tm_isdst >= 0 and date2.tm_isdst >= 0:\n assert date == date2, 'Date of version is not consistent'\n # date.tm_isdst is -1 means unknown state\n # compare its contents except daylight saving time status\n else:\n for i in range(len(date) - 1):\n assert date[i] == date2[i], 'Date of version is not consistent'\n\n rev = f's{rev}'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def get_versions_from_path(self, path):\n if not path:\n return []\n\n # convert '\\\\' to '/'\n path = os.path.normpath(path).replace(\"\\\\\", \"/\")\n from stalker import Repository\n\n os_independent_path = Repository.to_os_independent_path(path)\n logger.debug(\"os_independent_path: %s\" % os_independent_path)\n\n from stalker import Version\n from stalker.db.session import DBSession\n\n # try to get all versions with that info\n with DBSession.no_autoflush:\n versions = Version.query.filter(\n Version.full_path.startswith(os_independent_path)\n ).all()\n\n return versions", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def get_version(self, direc_path):\n try:\n archive = zipfile.ZipFile(direc_path, 'r')\n if u'cc/mallet/regression/' not in archive.namelist():\n return '2.0.7'\n else:\n return '2.0.8RC3'\n except Exception:\n\n xml_path = direc_path.split(\"bin\")[0]\n try:\n doc = et.parse(xml_path + \"pom.xml\").getroot()\n namespace = doc.tag[:doc.tag.index('}') + 1]\n return doc.find(namespace + 'version').text.split(\"-\")[0]\n except Exception:\n return \"Can't parse pom.xml version file\"", "def HACK_get_current_version_from_sysfs(coll_number):\n filename = \"/sys/fs/castle-fs/collections/{0}/version\".format(coll_number)\n try:\n with open(filename, 'r') as fd:\n for text in fd:\n return int(text, 16)\n except Exception, e:\n pycastle_log.error(\"Failed while trying to open {0} with exception {1}:{2}\".format(filename, type(e), e))\n raise", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_version(self, name: str, version=None) -> int:\n division, is_vertebrate = self.get_division(name)\n if version is None:\n latest_version = self.get_release(is_vertebrate)\n return latest_version\n\n if not str(version).isdecimal():\n raise TypeError(\"Version must be a number\")\n version = int(version)\n\n all_versions = self.get_releases(is_vertebrate)\n ensembl = f\"Ensembl{'' if is_vertebrate else 'Genomes'}\"\n if version not in all_versions:\n raise ValueError(\n f\"{ensembl} release version {version} \"\n f\"not found. Available versions: {all_versions}\"\n )\n\n releases = self.releases_with_assembly(name)\n if version not in releases:\n raise FileNotFoundError(\n f\"{name} not found on {ensembl} release {version}. \"\n f\"Available on release versions: {releases}\"\n )\n return version", "def get_revision_heaviest_tenant_one(database_file_path):\n # remove the line below in case you have implemented the query.\n raise NotImplementedError\n\n query = \"\"\"\n \"\"\"\n\n return _fetch_result_from_database(query, database_file_path)", "def get(self, filepath):\n try:\n collname = '%s.files' % self.bucketname\n coll = Collection(self.db, collname)\n if coll:\n doc = coll.find_one({'filename': str(filepath)}, sort=[('uploadDate', -1)])\n if doc:\n id = doc['_id']\n gout = self.gridfs.get(ObjectId(id))\n if gout:\n content = gout.read()\n gout.close()\n return content\n except Exception, e:\n print e\n return None", "def get_version_from_project_dir(self):\n versions = self.get_versions_from_path(self.project_directory)\n version = None\n\n if versions and len(versions):\n version = versions[0]\n\n return version", "def _get_latest_version(self, name: str) -> Environment:\n result = _get_latest(\n name,\n self._version_operations,\n self._resource_group_name,\n self._workspace_name,\n self._registry_name,\n )\n return Environment._from_rest_object(result)", "def find_version():\n _locals = locals()\n src_dir = os.path.abspath(os.path.dirname(__file__))\n version_file = os.path.join(src_dir, 'loudml', '_version.py')\n with io_open(version_file, mode='r') as fd:\n exec(fd.read()) # __version__ is set in the exec call.\n return _locals['__version__']", "def get_version():\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n version_path = os.path.join(current_dir, VERSION_FILE)\n\n with open(version_path, 'r') as version_fd:\n return version_fd.read().strip()", "def download_files(path):\n return edgar.download_index(path,2019,skip_all_present_except_last=False)", "def getVersion(self):\n try:\n filepath = f\"{EXTERNAL_DIRECTORY}/VERSION\"\n with open(filepath, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if line != \"\\n\":\n return line.replace(\"\\n\", \"\")\n\n\n except FileNotFoundError as e:\n _LOGGER.error(\"Could not find VERSION File.\")\n return None\n except Exception as e:\n _LOGGER.debug(\"Could not read program version file. Error message: %s\", e)\n return None", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_latest_version(self):\n study = self.source_study_version.study\n current_study_version = self.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same dataset associated with the current study version.\n try:\n current_dataset = SourceDataset.objects.get(\n source_study_version=current_study_version,\n i_accession=self.i_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_dataset", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def GetOpenedFile(self,file):\n\t\tif isinstance(file,str):\n\t\t\tindex = self.OpenedFilenames.index(file)\n\t\telif isinstance(file,int):\n\t\t\tindex=file\n\t\telse:\n\t\t\traise PycomError('Type of file in GetOpenedFile is wrong ')\n\t\treturn self.acad.Documents.Item(index)", "def get_file(self, path):\n return self._files.get(self._get_rel_path(path))", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def getFile( self, par, path ):\n\n return self.db.getFilePar( par, path )", "def get(self):\n if self.request.params.get(\"download\"):\n return get_file(self.request)\n document = self.request.validated[\"document\"]\n document_data = document.serialize(\"view\")\n document_data[\"previousVersions\"] = [\n i.serialize(\"view\") for i in self.request.validated[\"documents\"] if i.url != document.url\n ]\n return {\"data\": document_data}", "def getDocument(cred, documentPath):\n url = cred.base_url + \"documents/\" + documentPath\n\n return makeRequest(cred, url, 'GET')", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def _version_test(self, archive_dir):\r\n root = os.listdir(archive_dir)\r\n course_directory = archive_dir / root[0]\r\n return get_version(course_directory)", "def test_get_versions_cached(self):\n versions = {\"foo-1.0.tar.gz\": \"../../packages/foo-1.0.tar.gz\"}\n self.index._save_index(\"foo\", versions)\n with patch(\"cheddar.index.remote.get\") as mocked:\n result = self.index.get_versions(\"foo\")\n eq_(result, versions)\n eq_(mocked.call_count, 0)", "def svn_fs_file_contents(*args):\r\n return _fs.svn_fs_file_contents(*args)", "def readFile(self, path):\n return self.session.request('diag/files/?q=%s'\n % (path))", "def available_version(self) -> Sequence['outputs.VersionResponse']:\n return pulumi.get(self, \"available_version\")", "def _get_via_app_bundle(self, path: pathlib.Path | str) -> str:\n\n path = pathlib.Path(path) / \"Contents\" / \"Info.plist\"\n\n if not path.exists():\n logger.warning(\n f\"Could not determine application version. Missing: {path}...\"\n )\n return \"?\"\n\n with open(path, \"rb\") as f:\n data = plistlib.load(f)\n\n bundle_short_version: str = data.get(\"CFBundleShortVersionString\", \"?\")\n bundle_version: str = data.get(\"CFBundleVersion\", None)\n\n if bundle_version is None:\n return f\"{bundle_short_version}\"\n\n return f\"{bundle_short_version}-{bundle_version}\"", "def get_machine_version():\n return get_file_content(\"/home/pi/.machineconfig/latest_version\")", "def open_inventorybook(filepath):\n path_exists = os.path.exists(filepath)\n inventorybook = None\n if path_exists:\n try: # safest way to open or close file.\n with open(filepath, 'r') as infile:\n inventorybook = json.load(infile)\n finally:\n infile.close()\n return inventorybook", "def read(self, path):\n client = self.connect(VAULT_TOKEN)\n return client.read(path)", "def get(self, path):\n\t\treturn self.cache.get(path)", "def version_or_exit(path):\n\n with cd(path):\n versioning_file = join(os.curdir, 'versioning.py')\n try:\n get_version = run_command(versioning_file)\n if get_version.returncode:\n abort(colors.red('versioning.py') + ' returned an error.')\n else:\n return get_version.stdout.strip()\n except OSError:\n abort(colors.red('versioning file not found: ') + versioning_file)", "def lookup(self, path):\n if path == '/':\n path = ''\n best_fit = self.retrieve_catalog_for_path(path)\n return best_fit.find_directory_entry(path)", "def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")", "def _read_version(rootdir: Path) -> Union[str, None]:\n version_file = rootdir.joinpath(_METADATA)\n if version_file.exists():\n with version_file.open(\"r\") as f:\n content = json.load(f)\n if _VERSION_KEY in content:\n return content[_VERSION_KEY]\n return None", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def _load_document(path, app):\n start_inventor()\n document_type_enum = {\n 12289: 'UnnownDocument',\n 12290: 'PartDocument',\n 12291: 'AssemblyDocument',\n 12292: 'DrawingDocument',\n 12293: 'PresentationDocument',\n 12294: 'DesignElementDocument',\n 12295: 'ForeignModelDocument',\n 12296: 'SATFileDocument',\n 12297: 'NoDocument',\n }\n try:\n app.Documents.Open(str(path))\n document_type = document_type_enum[app.ActiveDocumentType]\n doc = win32com.client.CastTo(app.ActiveDocument, document_type)\n print(doc, document_type)\n return doc\n except:\n print('unable to load file')\n return None" ]
[ "0.64565796", "0.59338427", "0.59115607", "0.5859803", "0.58483046", "0.5716326", "0.5692346", "0.5668634", "0.56481016", "0.5610619", "0.56099266", "0.556473", "0.5553882", "0.55534357", "0.5535717", "0.55330545", "0.55328286", "0.5523551", "0.547328", "0.54688823", "0.546578", "0.5459197", "0.54324514", "0.5426627", "0.5378384", "0.53709376", "0.5360439", "0.5353064", "0.53516084", "0.5306917", "0.5290965", "0.528", "0.5278892", "0.52682155", "0.52577996", "0.5249557", "0.52435154", "0.523928", "0.5234935", "0.5228499", "0.52203953", "0.52020514", "0.51975834", "0.5197127", "0.5175927", "0.5170648", "0.5170051", "0.51628", "0.5152734", "0.5149948", "0.5138725", "0.5138208", "0.513666", "0.51229924", "0.51129717", "0.51094675", "0.51008433", "0.5097971", "0.50938666", "0.5077288", "0.5068121", "0.5063152", "0.50631034", "0.50622404", "0.505975", "0.50555384", "0.50550944", "0.50526917", "0.50451994", "0.503612", "0.5035761", "0.5035266", "0.5024483", "0.5024068", "0.50106037", "0.5009847", "0.49997815", "0.49986377", "0.499317", "0.49889237", "0.49864715", "0.49826917", "0.4981922", "0.4976458", "0.49681738", "0.49542904", "0.49536455", "0.49529764", "0.49520686", "0.495094", "0.4946998", "0.49280745", "0.49254602", "0.4919712", "0.49179956", "0.49167588", "0.49134913", "0.49117422", "0.49075255", "0.4899317" ]
0.67488974
0
Returns a JSON representation of ``BatchStats``.
def to_json(self): return { "total_sentences": self.total_sentences }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]", "def stats_to_json(self):\n return json.dumps({'name': self.name,\n 'points': self.points,\n 'field_researchers': self.field_researchers,\n 'control_team': self.control_team,\n 'virus_understanding': self.virus_understanding,\n 'cure_research': self.cure_research,\n 'public_awareness': self.public_awareness,\n 'disease_control': self.disease_control\n }, sort_keys=True, indent=2)", "def summarize_as_json(self):\n return json.dumps({\n 'total_time': self.total_time,\n 'std_dev_total_time': self.std_dev_total_time,\n 'max_memory': self.max_memory,\n 'std_dev_max_memory': self.std_dev_max_memory,\n 'average_memory': self.average_memory,\n 'std_dev_average_memory': self.std_dev_average_memory,\n 'average_cpu': self.average_cpu,\n 'std_dev_average_cpu': self.std_dev_average_cpu,\n }, indent=2)", "def stats(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\n (self.sessionid, self.name))\n return self.server.json_body(resp)", "def to_json(self) -> Text:\n options_dict = copy.copy(self.__dict__)\n options_dict[_TYPE_NAME_KEY] = 'StatsOptions'\n if options_dict['_slice_functions'] is not None:\n raise ValueError(\n 'StatsOptions cannot be converted with experimental_slice_functions.'\n )\n if options_dict['_generators'] is not None:\n raise ValueError(\n 'StatsOptions cannot be converted with generators.'\n )\n if self.schema is not None:\n del options_dict['_schema']\n options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)\n if self.slicing_config is not None:\n del options_dict['_slicing_config']\n options_dict[_SLICING_CONFIG_JSON_KEY] = json_format.MessageToJson(\n self.slicing_config)\n if self._per_feature_weight_override is not None:\n del options_dict['_per_feature_weight_override']\n options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {\n k.to_json(): v for k, v in self._per_feature_weight_override.items()\n }\n return json.dumps(options_dict)", "def response(self):\n response = {}\n if self.stats is not None:\n response = self.stats\n\n return json.dumps(response)", "def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def stats_to_json(self, sys_config):\n return json.dumps(self.stats[dict_to_key(sys_config)], sort_keys=True, indent=4)", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))", "def batch_to_dict(batch: BatchTrial) -> Dict[str, Any]:\n return {\n \"__type\": batch.__class__.__name__,\n \"index\": batch.index,\n \"trial_type\": batch.trial_type,\n \"ttl_seconds\": batch.ttl_seconds,\n \"status\": batch.status,\n \"status_quo\": batch.status_quo,\n \"status_quo_weight_override\": batch._status_quo_weight_override,\n \"time_created\": batch.time_created,\n \"time_completed\": batch.time_completed,\n \"time_staged\": batch.time_staged,\n \"time_run_started\": batch.time_run_started,\n \"abandoned_reason\": batch.abandoned_reason,\n \"run_metadata\": batch.run_metadata,\n \"stop_metadata\": batch.stop_metadata,\n \"generator_run_structs\": batch.generator_run_structs,\n \"runner\": batch.runner,\n \"abandoned_arms_metadata\": batch._abandoned_arms_metadata,\n \"num_arms_created\": batch._num_arms_created,\n \"optimize_for_power\": batch.optimize_for_power,\n \"generation_step_index\": batch._generation_step_index,\n \"properties\": batch._properties,\n }", "def _batch_to_json(self, batch, lengths):\n outputs = []\n cursor = 0\n for length in lengths:\n cursor_end = cursor + length\n\n mini_batch = batch[cursor:cursor_end]\n outputs.append(self._to_json(mini_batch))\n\n cursor = cursor_end\n return outputs", "def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json", "def __repr__(self):\n result = json.dumps({'processed': self._processed,\n 'failed': self._failed,\n 'total': self._total,\n 'time': str(self._time),\n 'chunk': self._chunk})\n return result", "def stats_json(self) -> Dict[str, Any]:\n return {\n name: function(self) for name, function in database_statistics_registry\n }", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def print_json(results):\r\n import json\r\n stats = calc_stats(results)\r\n print(json.dumps(stats._asdict()))", "def to_json(self):\n avg_height = round(sum(player.height_cm for player in self.__players) / len(self.__players), 1)\n\n return json.dumps({\n 'Players': [player.to_json() for player in self.__players],\n 'AveragePPG': round(sum(player.ppg for player in self.__players) / len(self.__players), 2),\n 'Leaders': [{self.__ranks[i]: p.full_name, 'PPG': p.ppg} for i, p in enumerate(self.__players[:len(\n self.__ranks)])],\n 'Count': {pos: sum(1 for p in self.__players if p.position == pos) for pos in self.__unique_position},\n 'AverageHeight': f'{avg_height} cm'\n })", "def get(self, request):\n query = Stats()\n stats = query.get_format_stats()\n return Response(stats)", "def dumps(self):\n return json.dumps(str((self.__quantile, self.__maxx)))", "def print_json(results, number, concurrency):\n import json\n stats = calc_stats(results, number, concurrency)\n print(json.dumps(stats))", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def stats():\n stats = {\n \"Amenity\": \"amenities\",\n \"City\": \"cities\",\n \"Place\": \"places\",\n \"Review\": \"reviews\",\n \"State\": \"states\",\n \"User\": \"users\"\n }\n\n stat = {name: storage.count(obj) for obj, name in stats.items()}\n return jsonify(stat)", "def get_stats(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/stats\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Update object\n self.stats = _response.json()", "def stats(self) -> Dict:\n return self._stats", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def get_stats(self):\n stats = \\\n 'cluster: %s\\ncount = %d, size = %d, minvar = %f, avg_dist = %s\\n'\\\n % (self.name, self.count, self.size, self.minvar, self.avg_dist)\n return stats", "def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)", "def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)", "def get_stats(prefix):\n status = 200\n return flask.Response(get_stats_json(prefix),\n status=status,\n mimetype='application/json')", "def get_plant_batch_stats(db_path: str) -> int:\n return get_db_count(db_path, 'batches.db', 'batches')", "def job_level_to_human_readable(batch):\n response = {\n \"batchId\": batch[Attributes.BATCH_ID],\n \"batchStatus\": batch[Attributes.BATCH_STATUS],\n \"labelingJobName\": batch[Attributes.LABELING_JOB_NAME],\n \"labelAttributeName\": batch[Attributes.LABEL_ATTRIBUTE_NAME],\n \"labelCategoryS3Uri\": batch[Attributes.LABEL_CATEGORY_CONFIG],\n \"jobInputS3Uri\": batch[Attributes.JOB_INPUT_LOCATION],\n \"jobInputS3Url\": create_presigned_url(batch[Attributes.JOB_INPUT_LOCATION]),\n \"jobOutputS3Uri\": batch[Attributes.JOB_OUTPUT_LOCATION],\n \"jobOutputS3Url\": create_presigned_url(batch[Attributes.JOB_OUTPUT_LOCATION]),\n }\n\n num_frames = batch.get(Attributes.NUM_CHILD_BATCHES)\n num_frames_completed = batch.get(Attributes.NUM_CHILD_BATCHES_COMPLETE)\n if num_frames is not None and num_frames_completed is not None:\n response[\"numFrames\"] = num_frames\n response[\"numFramesCompleted\"] = num_frames_completed\n\n return response", "def to_json(self):\n payload = {\n \"btc_addr\": self.btc_addr,\n \"last_seen\": (datetime.utcnow() - self.last_seen).seconds,\n \"height\": self.height\n }\n return json.dumps(payload)", "def to_json(self):\n return json.dumps({'messages': self.messages},\n default=lambda i: i.__dict__)", "def getFormattedJobStatistics(self):\n\t\tformatted_job_stats = [self.name]\n\t\tformatted_job_stats.append(str(self.retry_count))\n\t\tif self.site is None:\n\t\t\tformatted_job_stats.append('-')\n\t\telse:\n\t\t\tformatted_job_stats.append(self.site)\n\t\tformatted_job_stats.append(round_to_str(self.kickstart))\n\t\tformatted_job_stats.append(round_to_str(self.post))\n\t\tformatted_job_stats.append(round_to_str(self.condor_delay))\n\t\tformatted_job_stats.append(round_to_str(self.resource))\n\t\tformatted_job_stats.append(round_to_str(self.runtime))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec_delay))\n\t\treturn formatted_job_stats", "def to_json(self):\n return json.dumps(self.for_json())", "def get_stats(self):\n return self.stats", "def to_json(self):\n return {\n \"sent_maxlen\": self.sent_maxlen,\n \"batch_size\": self.batch_size,\n \"seed\": self.seed,\n \"sep\": self.sep,\n \"classes\": list(self.classes_()),\n \"hidden_units\": self.hidden_units,\n \"trainable_emb\": self.trainable_emb,\n \"emb_dropout\": self.emb_dropout,\n \"num_of_latent_layers\": self.num_of_latent_layers,\n \"epochs\": self.epochs,\n \"pred_dropout\": self.pred_dropout,\n \"emb_filename\": self.emb_filename,\n \"pos_tag_embedding_size\": self.pos_tag_embedding_size,\n }", "def summaryJSON(self, filename=None):\n d = self.robotGridSummaryDict()\n if filename is not None:\n with open(filename, \"w\") as f:\n json.dump(d, f, separators=(',', ':'))\n else:\n return json.dumps(d)", "def to_json(self):\n pass", "def ajax_get_statistics():\r\n return jsonify(generate_statistics())", "def batch_info():\n return BatchInfo(\"UFG Hackathon\")", "def to_json(self):\n return {\n \"sent_maxlen\": self.sent_maxlen,\n \"batch_size\": self.batch_size,\n \"seed\": self.seed,\n \"classes\": list(self.classes_()),\n \"hidden_units\": self.hidden_units,\n \"trainable_emb\": self.trainable_emb,\n \"emb_dropout\": self.emb_dropout,\n \"num_of_latent_layers\": self.num_of_latent_layers,\n \"epochs\": self.epochs,\n \"pred_dropout\": self.pred_dropout,\n \"emb_filename\": self.emb_filename,\n \"pos_tag_embedding_size\": self.pos_tag_embedding_size,\n \"model_name\":self.model_name\n }", "def write(self, output='jsonstat'):\n\n if output == 'jsonstat':\n return json.dumps(OrderedDict(self), cls=NumpyEncoder)\n elif output == 'dataframe':\n return get_dim_label(self, self['label'], 'dimension')\n else:\n raise ValueError(\"Allowed arguments are 'jsonstat' or 'dataframe'\")", "def getStats(self):\n\n raise NotImplementedError", "def to_json(self):\n return {\n \"version\": 2,\n \"index_values\": self.counts_compressed(),\n \"values\": None,\n \"offset\": self.offset,\n \"start\": self.start,\n \"stop\": self.stop,\n \"step\": self.step,\n }", "def to_json(self) -> str:\n data_dict = self._to_list_dict()\n return json.dumps(data_dict, indent=4, cls=NumpyEncoder)", "def get_stats():\r\n stats = {\r\n \"progress_precent\": 100.0*finished_work_units_amount/work_units_amount,\r\n \"results\": None if work_status == Db.WorkStatusNames.finished_work.value else Db.collect_results(),\r\n #If it's already finished, then all the results were already sent to the main server.\r\n }\r\n return stats", "def report_json(self):\n # type: () -> Optional[AnyStr]\n return json.dumps(self.gen_report(as_dict=True), indent=4)", "def stats(self):\n return self._stats", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def build_batch_stats():\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance", "def build_batch_stats():\n\n # Copy for better stability.\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n second_moment = variance + tf.square(mean)\n\n return mean, variance, second_moment", "def dbstats_api():\n if not config.DEBUG:\n limit_to_localhost()\n\n return jsonify(status='ok', stats=sqlalchemy_pool_status()) # cant be async, used by the reboot script", "def to_json(self):\n return json.dumps(self, default=json_converter, indent=2)", "def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)", "def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def as_json(self):\n # if we don't convert it to a dict we'll get a whole bunch of 'can't be serialized' things\n # match = self.__dict__\n # match.pop('_sa_instance_state', None)\n # for k in match:\n #\n # match['date'] = match['date'].isoformat()\n m = self.__dict__\n m['explosions'] = self.explosions.all()\n m['deaths'] = self.deaths.all()\n m['antagobjs'] = self.antagobjs.all()\n m['uplinkbuys'] = self.uplinkbuys.all()\n m['badassbuys'] = self.badassbuy.all()\n m['populationstats'] = self.populationstats.all()\n\n return dict_to_json(m)", "def serialize(self):\n return {\n \"student\": self.student.serialize,\n \"class_rank\": self.class_rank,\n \"hist_rank\": self.hist_rank,\n \"strongest_sub\": {\n \"field\": self.strongest_sub.serialize,\n \"avg\": self.strongest_sub_avg\n },\n \"weakest_sub\": {\n \"field\": self.weakest_sub.serialize,\n \"avg\": self.weakest_sub_avg\n }\n }", "def get_stats(self):\n return self.manager.get_stats(self)", "def to_json(self):\r\n\r\n object_json = dict()\r\n object_json[\"Type\"] = self.__class__.__name__\r\n game_json = dict()\r\n game_json[\"x_dist\"] = self.x_dist\r\n game_json[\"y_dist\"] = self.y_dist\r\n game_json[\"turn_number\"] = self.turn_number\r\n game_json[\"max_turns\"] = self.max_turns\r\n game_json[\"num_to_win\"] = self.num_to_win\r\n game_json[\"winner\"] = self.winner\r\n game_json[\"board\"] = self.board.to_json()\r\n game_json[\"board_history\"] = [board.to_json() for board in self.board_history]\r\n game_json[\"players\"] = [player.to_json() for player in self.players]\r\n object_json[\"Object\"] = game_json\r\n\r\n return json.dumps(object_json)", "def stats(self):\r\n return {}", "def to_json(self, *args, **kwargs):\n data = self.to_dict()\n\n return json_util.dumps(data)", "def to_json(self):\n return json.dumps(self.__dict__)", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def dumps(self) -> bytes:\n return json.dumps(\n {\n 'training': {'timestamp': self._strftime(self.training.timestamp), 'ordinal': self.training.ordinal},\n 'tuning': {'timestamp': self._strftime(self.tuning.timestamp), 'score': self.tuning.score},\n 'states': [str(s) for s in self.states],\n },\n indent=4,\n ).encode('utf-8')", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def json_out(db, options):\n stats = {\"stats\": basic_stats(db)}\n stats['logins_per_rp'] = db['rp']\n if options.quiet:\n print(dumps(stats, separators=(',', ':')))\n else:\n print(dumps(stats, indent=2, separators=(',', ': ')))", "def to_json(self):\n\n scaler_json=self.__dict__.copy()\n scaler_json['scale_']=scaler_json['scale_'].tolist()\n scaler_json['min_']=scaler_json['min_'].tolist()\n scaler_json['data_min_']=scaler_json['data_min_'].tolist()\n scaler_json['data_max_']=scaler_json['data_max_'].tolist()\n scaler_json['data_range_']=scaler_json['data_range_'].tolist()\n\n return json.dumps(scaler_json)", "def to_json(self) -> JSON:\n pass", "def as_json(self):\n return json.dumps(self.as_dict(), indent=2, sort_keys=False)", "def create_json_report(output):\n # Initial work, just dump mia_metrics and dummy_metrics into a json structure\n return json.dumps(output, cls=NumpyArrayEncoder)", "def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)", "def to_json_string(self):\n return json.dumps(dict(self), indent=2, sort_keys=True) + \"\\n\"", "def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)", "def as_json(self) -> str:\n return json.dumps(self, cls=_RecordingJSONEncoder)", "def to_json(self):\n return {\n \"active\": self.active,\n \"started\": self.started,\n \"finished\": self.finished,\n \"id\": self.monitor_id,\n \"waitTime\": self.wait_time,\n \"runInterval\": self.run_interval,\n }", "def to_json(self):\r\n # pylint: disable=no-member\r\n return {\r\n \"id\": self.id,\r\n \"name\": self.name,\r\n \"version\": Group.VERSION\r\n }", "def cls2json(self):\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def to_json(self) -> str:\n return json.dumps(asdict(self))", "def tojson(self):\n return json.dumps(self.jsonable())", "def to_json(self):\n return json.dumps(self._asdict())", "def convert_to_db_json(self):\n data = super().convert_to_db_json()\n oltpbench_data = {\n 'benchmark_type': self.type,\n 'query_mode': self.parameters.query_mode,\n 'scale_factor': self.parameters.scale_factor,\n 'terminals': self.parameters.terminals,\n 'client_time': self.parameters.client_time,\n 'weights': convert_weights_to_dict(self.parameters.transaction_weights),\n 'wal_device': self.metadata.environment.wal_device,\n 'max_connection_threads': self.parameters.max_connection_threads,\n 'incremental_metrics': convert_incremental_metrics_to_dict(self.metrics.incremental_metrics)\n }\n data.update(oltpbench_data)\n return data", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def metrics_to_json(pipeline_result, fname):\n metric_results = pipeline_result.metrics().query()\n results = {}\n for counter in metric_results['counters']:\n counter_name = counter.key.step + ':' + counter.key.metric.name\n results[counter_name] = counter.result\n with tf.io.gfile.GFile(fname, 'w') as f:\n f.write(json.dumps(results, indent=4, sort_keys=True))", "def to_json(self):\n return json.dumps(self, default=lambda i: i.__dict__)", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json(self):\n\n keys = [\n \"job_id\",\n \"facility_id\",\n \"state\",\n \"exception\",\n \"traceback\",\n \"track_progress\",\n \"cancellable\",\n \"extra_metadata\",\n \"progress\",\n \"total_progress\",\n \"args\",\n \"kwargs\",\n \"func\",\n \"result\",\n \"long_running\",\n ]\n\n working_dictionary = {\n key: self.__dict__[key] for key in keys if key in self.__dict__\n }\n\n try:\n # Ensure a consistent and compact JSON representation across Python versions\n string_result = json.dumps(working_dictionary, separators=(\",\", \":\"))\n except TypeError as e:\n # A Job's arguments, results, or metadata are prime suspects for\n # what might cause this error.\n raise TypeError(\n \"Job objects need to be JSON-serializable: {}\".format(str(e))\n )\n return string_result", "def to_json(self):\n return [\"population\", self.species_index, self.card_trade_index]", "def as_json(self):", "def format(self, obj):\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"" ]
[ "0.69310164", "0.6797208", "0.66209364", "0.63575345", "0.6177473", "0.6155753", "0.6147767", "0.6138568", "0.604386", "0.60371757", "0.6009655", "0.59683806", "0.5925637", "0.5886954", "0.588449", "0.5846016", "0.58390856", "0.5832954", "0.5819733", "0.58175325", "0.5811385", "0.578977", "0.5766914", "0.5713675", "0.5686334", "0.5684882", "0.56779695", "0.56732786", "0.5664908", "0.55537254", "0.5552076", "0.55505234", "0.5549962", "0.5536889", "0.5525156", "0.5514372", "0.5513935", "0.5505526", "0.54943824", "0.5491093", "0.54845005", "0.54734564", "0.54731864", "0.5467869", "0.5460164", "0.5458987", "0.54584706", "0.5453708", "0.5448364", "0.54412216", "0.5437602", "0.5433202", "0.5420111", "0.5418086", "0.54173887", "0.5410689", "0.5408246", "0.53956985", "0.53919744", "0.5391459", "0.5391294", "0.5390576", "0.5388211", "0.53769684", "0.5372702", "0.53725064", "0.53725064", "0.53725064", "0.5364713", "0.5360867", "0.53559965", "0.5355164", "0.5353685", "0.5350641", "0.535026", "0.53463215", "0.53394806", "0.5328759", "0.5313202", "0.53058", "0.5305463", "0.5305313", "0.5305313", "0.5304391", "0.5303011", "0.52989644", "0.5292945", "0.5289889", "0.5289889", "0.52894396", "0.5281606", "0.5281047", "0.5280408", "0.5279975", "0.5279545", "0.5262819", "0.526258", "0.526258", "0.526258", "0.526258" ]
0.5677763
27
A simple wrapper for scipy.optimize.minimize using JAX.
def minimize(fun, x0, method=None, args=(), bounds=None, constraints=(), tol=None, callback=None, options=None): # Use tree flatten and unflatten to convert params x0 from PyTrees to flat arrays x0_flat, unravel = ravel_pytree(x0) # Wrap the objective function to consume flat _original_ # numpy arrays and produce scalar outputs. def fun_wrapper(x_flat, *args): x = unravel(x_flat) loss_val = float(fun(x, *args)) return loss_val # Wrap the gradient in a similar manner jac = jit(grad(fun)) def jac_wrapper(x_flat, *args): x = unravel(x_flat) g_flat, _ = ravel_pytree(jac(x, *args)) og = onp.array(g_flat) return og # Wrap the callback to consume a pytree def callback_wrapper(x_flat, *args): if callback is not None: x = unravel(x_flat) return callback(x, *args) # Minimize with scipy results = scipy.optimize.minimize(fun_wrapper, x0_flat, args=args, method=method, jac=jac_wrapper, callback=callback_wrapper, bounds=bounds, constraints=constraints, tol=tol, options=options) # pack the output back into a PyTree results["x"] = unravel(results["x"]) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimize(fun: Callable[..., float],\n x0: np.ndarray,\n args: Tuple = (),\n method: Optional[str] = None,\n **kwargs) -> scipy.optimize.OptimizeResult:\n if method.lower() in OPTIMIZERS:\n optimizer = OPTIMIZERS[method.lower()]\n return optimizer(fun, x0, args=args, **kwargs)\n return scipy.optimize.minimize(fun, x0, args=args, method=method, **kwargs)", "def minimize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'min',method,**kwargs)", "def minimize(\n func: Callable,\n x0: Union[Array, BlockArray],\n args: Union[Tuple, Tuple[Any]] = (),\n method: str = \"L-BFGS-B\",\n hess: Optional[Union[Callable, str]] = None,\n hessp: Optional[Callable] = None,\n bounds: Optional[Union[Sequence, spopt.Bounds]] = None,\n constraints: Union[spopt.LinearConstraint, spopt.NonlinearConstraint, dict] = (),\n tol: Optional[float] = None,\n callback: Optional[Callable] = None,\n options: Optional[dict] = None,\n) -> spopt.OptimizeResult:\n\n if snp.util.is_complex_dtype(x0.dtype):\n # scipy minimize function requires real-valued arrays, so\n # we split x0 into a vector with real/imaginary parts stacked\n # and compose `func` with a `_join_real_imag`\n iscomplex = True\n func_ = lambda x: func(_join_real_imag(x))\n x0 = _split_real_imag(x0)\n else:\n iscomplex = False\n func_ = func\n\n x0_shape = x0.shape\n x0_dtype = x0.dtype\n x0 = x0.ravel() # if x0 is a BlockArray it will become a jax array here\n\n # Run the SciPy minimizer\n if method in (\n \"CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, \"\n \"trust-exact, trust-constr\"\n ).split(\n \", \"\n ): # uses gradient info\n min_func = _wrap_func_and_grad(func_, x0_shape, x0_dtype)\n jac = True # see scipy.minimize docs\n else: # does not use gradient info\n min_func = _wrap_func(func_, x0_shape, x0_dtype)\n jac = False\n\n res = spopt.OptimizeResult({\"x\": None})\n\n def fun(x0):\n nonlocal res # To use the external res and update side effect\n res = spopt.minimize(\n min_func,\n x0=x0,\n args=args,\n jac=jac,\n method=method,\n options=options,\n ) # Returns OptimizeResult with x0 as ndarray\n return res.x.astype(x0_dtype)\n\n # HCB call with side effects to get the OptimizeResult on the same device it was called\n res.x = hcb.call(\n fun,\n arg=x0,\n result_shape=x0, # From Jax-docs: This can be an object that has .shape and .dtype attributes\n )\n\n # un-vectorize the output array from spopt.minimize\n res.x = snp.reshape(\n res.x, x0_shape\n ) # if x0 was originally a BlockArray then res.x is converted back to one here\n\n if iscomplex:\n res.x = _join_real_imag(res.x)\n\n return res", "def minimize(self):\n raise NotImplementedError", "def objective_function(x):\n return x * 1 # change this to our actual function", "def fake_minimize(fn):\n def side_effect(o, mx, **kwargs):\n return mock.MagicMock(x=mx)\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n with mock.patch.object(scipy.optimize, 'minimize', side_effect=side_effect):\n return fn(*args, **kwargs)\n\n return wrapper", "def scipy_lbfgs(fun, jac, x0):\n result = scipy.optimize.minimize(fun, x0, jac=jac, method='L-BFGS-B')\n if not result['success']:\n raise RuntimeError(\"L-BFGS-B failed to converge\")\n return result['x']", "def minimize(self,x0=None):\n import time\n start_time = time.time()\n tmp,total_par,lik_grad = self.minimize_both_vers(numerical=False,x0=x0)\n if tmp['success']==False:\n print(\"Probably a problem with gradient, do numerical\")\n tmp,total_par,lik_grad = self.minimize_both_vers(x0=tmp['x'],numerical=True)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n self.lengthscale = total_par[0]\n self.variance = total_par[1]\n self.gstds = total_par[2]\n tmp['fx']=np.array([total_par[0],total_par[1],total_par[2]])\n return tmp,total_par,lik_grad", "def minimize(A, t, y0, function):\n return y0 - function(A, t)", "def test_j1():\n import time\n t1 = time.time()\n\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.j1(x) for x in x_list ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.j1(x) for x in x_list ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j1 disagrees with scipy.special.j1\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of j1.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 0.0,\n 0.4432857612090717,\n 0.099500832639236036,\n 0.22066345298524112,\n -0.29514244472901613,\n 0.066560642470571682\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j1 disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def get_scipy_minimizer(**kwargs):\n def minimizer(objective, n_params):\n params = [random.random() for _ in range(n_params)]\n result = scipy_minimizer(objective, params, **kwargs)\n return result.x\n\n return minimizer", "def optimize(self, x0):\n (result,f,d) = fmin_l_bfgs_b(lambda x:self.costFun(x), np.ravel(x0),lambda x: self.gradFun(x))\n print(\"optimization completed with cost: \" + str(f))\n return result.reshape(self.inp_shape)", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def _optimize(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda x:self(x)\n elif type == 'max':\n g=lambda xs:-1*self(x)\n elif type == 'root':\n g=lambda x:np.abs(self(x))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda x:np.abs(self(x)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]", "def test_j0():\n import time\n t1 = time.time()\n\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.j0(x) for x in x_list ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.j0(x) for x in x_list ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0 disagrees with scipy.special.j0\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of j0.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 1.0, \n 0.76078097763218844,\n 0.99002497223957631,\n -0.34429626039888467,\n 0.12203335459282282,\n 0.062379777089647245\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0 disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def minimize(self):\n pass", "def Example_2():\r\n print \"\\n** Example_2: Finding the minimum of the Rosenbrock function with 2 variables under constraints **\"\r\n\r\n Ex = optim_wrapper()\r\n X0 = np.zeros(2)\r\n lim = [(-2.0, 2.0)]*2\r\n Ex.set_X0(X0)\r\n Ex.set_lim(lim)\r\n Ex.set_penalties_func(pen)\r\n Ex.set_norm_count(200)\r\n Ex.set_nb_best(100)\r\n Ex.set_obj_func(obj)\r\n Ex.set_wrapper()\r\n Ex.launch_multi_opti()\r\n print Ex\r\n\r\n X_solution = [1.0, 1.0]\r\n res_string = \"Results of the optimisation: {:03.4f}, expected results: {:03.4f}\".format(obj(Ex.get_res()), obj(X_solution))\r\n print res_string\r\n print \"*\" * len(res_string)", "def main():\n parser = argparse.ArgumentParser(usage=__doc__)\n parser.add_argument(\"--order\", type=int, default=3, help=\"order of Bessel function\")\n args = parser.parse_args()\n f = lambda x: -special.jv(args.order, x)\n sol = optimize.minimize(f, 1.0)\n x = np.linspace(0, 10, 5000)\n plt.plot(x, special.jv(args.order, x), '-', sol.x, -sol.fun, 'o')\n plt.show() # Displays the image in matplotlib window", "def Example_1(nb_param):\r\n print \"\\n** Example_1: Finding the minimum of the Rosenbrock function with {0} variables **\".format(nb_param)\r\n\r\n Ex = optim_wrapper()\r\n X0 = np.zeros(nb_param)\r\n lim = [(-2.0,2.0)]*nb_param\r\n Ex.set_X0(X0)\r\n Ex.set_lim(lim)\r\n Ex.set_norm_count(nb_param**2*2)\r\n Ex.set_nb_best(nb_param**2)\r\n Ex.set_obj_func(obj)\r\n Ex.set_multi_proc(1)\r\n Ex.set_wrapper()\r\n Ex.test_test()\r\n print Ex\r\n\r\n X_solution = [1.0]*nb_param\r\n res_string = \"Results of the optimisation: {:03.4f}, expected results: {:03.4f}\".format(obj(Ex.get_res()),obj(X_solution))\r\n print res_string\r\n print \"*\"*len(res_string)", "def J(m, x):\n N_slices = 1000 # question asks for a function for which N is not a var\n def temp_integrand(t): return integrand(order=m, theta=t, x_eval=x)\n Jmx = fi.simps_int(0., np.pi, N_slices, temp_integrand)\n return Jmx", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def optimize5():\n xl = xl_app()\n qt_app = get_qt_app() # pragma noqc\n # Get the initial values of the input cells\n msgBox = OpDialog()\n result = msgBox.exec_()\n if not result: # user cancelled\n return\n\n in_range = get_range(msgBox.in_range.text())\n out_cell = get_range(msgBox.out_cell.text())\n in_values = list(in_range.Value)\n X = np.array([x[0] for x in in_values])\n\n orig_calc_mode = xl.Calculation\n try:\n # switch Excel to manual calculation\n # and disable screen updating\n xl.Calculation = constants.xlManual\n xl.ScreenUpdating = False\n\n # run the minimization routine\n xl_obj_func = partial(obj_func, xl, in_range, out_cell)\n print(f\"X = {X}\")\n result = minimize(xl_obj_func, X, method=\"nelder-mead\")\n in_range.Value = [(float(x),) for x in result.x]\n xl.ScreenUpdating = True\n mbox = QMessageBox()\n mbox.setIcon(QMessageBox.Information)\n mbox.setText(\"Optimization results shown below.\" \"\\nMake changes permanent?\")\n mbox.setWindowTitle(\"Optimization Complete\")\n mbox.setInformativeText(\n \"\\n\".join(\n [\n \"Successful: %s\" % result.success,\n result.message,\n \"After %d iterations\" % result.nit,\n ]\n )\n )\n mbox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n yes_no = mbox.exec_()\n if yes_no != QMessageBox.Ok:\n in_range.Value = in_values\n else:\n in_range.Value = [(float(x),) for x in result.x]\n\n finally:\n # restore the original calculation\n # and screen updating mode\n xl.ScreenUpdating = True\n xl.Calculation = orig_calc_mode", "def Optimize(self):\n return _gmat_py.ExternalOptimizer_Optimize(self)", "def min_scalar(objective, **kwargs):\n result = minimize_scalar(objective, **kwargs)\n return result.fun", "def _approx_jacobian(func, xbar, epsilons):\n\n n = xbar.shape[0]\n ybar = func(xbar)\n m = ybar.shape[0]\n\n J = np.zeros((m, n))\n \n for i in range(n):\n # Forward evaluation\n xf = np.copy(xbar)\n xf[i] = xbar[i] + epsilons[i]\n yf = func(xf)\n\n # Backward evaluation\n xb = np.copy(xbar)\n xb[i] = xbar[i] - epsilons[i]\n yb = func(xb)\n \n # Slope\n delta = yf - yb\n\n J[:, i] = delta / (2.0 * epsilons[i])\n\n return J", "def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\"\"\n Solve A x = b for x using the m-restarted GMRES method. This is\n intended to be called via jax_backend.gmres.\n\n Given a linear mapping with (n x n) matrix representation\n A = A_mv(*A_args) gmres_m solves\n Ax = b (1)\n where x and b are length-n vectors, using the method of\n Generalized Minimum RESiduals with M iterations per restart (GMRES_M).\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x0: Initial guess solution.\n tol, atol: Solution tolerance to achieve,\n norm(residual) <= max(tol * norm(b), atol).\n tol is also used to set the threshold at which the Arnoldi factorization\n terminates.\n num_krylov_vectors: Size of the Krylov space to build at each restart.\n maxiter: The Krylov space will be repeatedly rebuilt up to this many\n times.\n Returns:\n x: The approximate solution.\n beta: Norm of the residual at termination.\n n_iter: Number of iterations at termination.\n converged: Whether the desired tolerance was achieved.\n \"\"\"\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done\n\n def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n \"\"\"\n A single restart of GMRES.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The `b` in `A @ x = b`.\n x: Initial guess solution.\n tol: Solution tolerance to achieve,\n num_krylov_vectors : Size of the Krylov space to build.\n Returns:\n done: Whether convergence was achieved.\n beta: Magnitude of residual (i.e. the error estimate).\n x: The approximate solution.\n \"\"\"\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x\n\n @jax.jit\n def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n \"\"\"\n Computes the residual vector r and its norm, beta, which is minimized by\n GMRES.\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and\n v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x: Initial guess solution.\n Returns:\n r: The residual vector.\n beta: Its magnitude.\n \"\"\"\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta\n\n def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n \"\"\"\n Updates the solution in response to the information computed by the\n main GMRES loop.\n\n Args:\n k: The final iteration which was reached by GMRES before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: The R factor in H = QR where H is the Arnoldi overlap matrix.\n beta_vec: Stores the Givens factors used to map H into QR.\n x0: The initial guess solution.\n Returns:\n x: The updated solution.\n \"\"\"\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Builds the Arnoldi decomposition of (A, v), where v is the normalized\n residual of the current solution estimate. The decomposition is\n returned as V, R, where V is the usual matrix of Krylov vectors and\n R is the upper triangular matrix in H = QR, with H the usual matrix\n of overlaps.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n n_kry: Size of the Krylov space to build; this is called\n num_krylov_vectors in higher level code.\n x0: Guess solution.\n r: Residual vector.\n beta: Magnitude of r.\n tol: Solution tolerance to achieve.\n b_norm: Magnitude of b in Ax = b.\n Returns:\n k: Counts the number of iterations before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: From H = QR where H is the Arnoldi matrix of overlaps.\n beta_vec: Stores Q implicitly as Givens factors.\n \"\"\"\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)\n\n VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray,\n float, jax.ShapedArray]\n ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int]\n GmresCarryType = Tuple[VarType, ConstType]\n\n @jax.jit\n def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n \"\"\"\n This function dictates whether the main GMRES while loop will proceed.\n It is equivalent to:\n if k < n_kry and err > tol:\n return True\n else:\n return False\n where k, n_kry, err, and tol are unpacked from gmres_carry.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n (bool): Whether to continue iterating.\n \"\"\"\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.\n\n @jax.jit\n def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n \"\"\"\n Performs a single iteration of gmres_krylov. See that function for a more\n detailed description.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n gmres_carry: The updated gmres_carry.\n \"\"\"\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)\n\n @jax.jit\n def _gs_step(r: jax.ShapedArray,\n v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs one iteration of the stabilized Gram-Schmidt procedure, with\n r to be orthonormalized against {v} = {v_0, v_1, ...}.\n\n Args:\n r: The new vector which is not in the initially orthonormal set.\n v_i: The i'th vector in that set.\n Returns:\n r_i: The updated r which is now orthonormal with v_i.\n h_i: The overlap of r with v_i.\n \"\"\"\n h_i = jnp.vdot(v_i, r)\n r_i = r - h_i * v_i\n return r_i, h_i\n\n @jax.jit\n def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs the kth iteration of the Arnoldi reduction procedure.\n Args:\n k: The current iteration.\n A_mv, A_args: A function A_mv(v, *A_args) performing a linear\n transformation on v.\n V: A matrix of size (n, K + 1), K > k such that each column in\n V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes.\n H: A matrix of size (K, K), K > k with H[:, k] all zeroes.\n Returns:\n V, H: With their k'th columns respectively filled in by a new\n orthogonalized Krylov vector and new overlaps.\n \"\"\"\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H\n\n####################################################################\n# GIVENS ROTATIONS\n####################################################################\n @jax.jit\n def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n \"\"\"\n Successively applies each of the rotations stored in givens to H_col.\n\n Args:\n H_col : The vector to be rotated.\n givens: 2 x K, K > k matrix of rotation factors.\n k : Iteration number.\n Returns:\n H_col : The rotated vector.\n \"\"\"\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col\n\n @jax.jit\n def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Applies the Givens rotations stored in the vectors cs and sn to the vector\n H_col. Then constructs a new Givens rotation that eliminates H_col's\n k'th element, yielding the corresponding column of the R in H's QR\n decomposition. Returns the new column of R along with the new Givens\n factors.\n\n Args:\n H_col : The column of H to be rotated.\n givens: A matrix representing the cosine and sine factors of the\n previous GMRES Givens rotations, in that order\n (i.e. givens[0, :] -> the cos factor).\n k : Iteration number.\n Returns:\n R_col : The column of R obtained by transforming H_col.\n givens_k: The new elements of givens that zeroed out the k+1'th element\n of H_col.\n \"\"\"\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens\n\n @jax.jit\n def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n \"\"\"\n Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta)\n so that [cs -sn] @ [v1] = [r]\n [sn cs] [v2] [0]\n Args:\n v1, v2: The scalars.\n Returns:\n cs, sn: The rotation factors.\n \"\"\"\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn\n\n fnames = [\n \"gmres_m\", \"gmres_residual\", \"gmres_krylov\", \"gs_step\",\n \"kth_arnoldi_step\", \"givens_rotation\"\n ]\n functions = [\n gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step,\n givens_rotation\n ]\n\n class Functions:\n\n def __init__(self, fun_dict):\n self.dict = fun_dict\n\n def __getattr__(self, name):\n return self.dict[name]\n\n return Functions(dict(zip(fnames, functions)))", "def jacobian(self, x):\n pass", "def Optimization(*args, **kwargs):\n from warnings import warn\n\n warn(\n \"Optimization has been renamed to OptimizationResult and will be removed as soon as v0.13.0\", DeprecationWarning\n )\n return OptimizationResult(*args, **kwargs)", "def minimize(self, cost_function, initial_params):\n\n # Optimization Results Object\n history = []\n\n def wrapped_cost_function(params):\n value = cost_function.evaluate(params)\n history.append(cost_function.evaluations_history[-1])\n print(f'Function evaluation {len(history)}: {value}', flush=True)\n print(f'{params}', flush=True)\n return value\n\n strategy = cma.CMAEvolutionStrategy(initial_params, self.sigma_0, self.options)\n result = strategy.optimize(wrapped_cost_function).result\n\n optimization_results = {}\n optimization_results['opt_value'] = result.fbest\n optimization_results['opt_params'] = result.xbest\n optimization_results['history'] = history\n optimization_results['nfev'] = result.evaluations\n optimization_results['nit'] = result.iterations\n optimization_results['cma_xfavorite'] = list(result.xfavorite)\n\n return OptimizeResult(optimization_results)", "def EvaluateJacobian(x):\n j = np.zeros((NOBSERVATIONS, 3))\n\n for i in range(NOBSERVATIONS):\n base = np.exp(-x[0] * t[i]) / (x[1] + x[2] * t[i])\n\n j[i][0] = t[i] * base\n j[i][1] = base / (x[1] + x[2] * t[i])\n j[i][2] = base * t[i] / (x[1] + x[2] * t[i])\n\n return j", "def _generic_minimize(method, loss, x0,\n verbose=False,\n num_iters=1000,\n tol=1e-4,\n state=None,\n full_output=False,\n suppress_warnings=False,\n **kwargs):\n # Flatten the loss\n _x0, unflatten = flatten(x0)\n _objective = lambda x_flat, itr: loss(unflatten(x_flat), itr)\n\n if verbose:\n print(\"Fitting with {}.\".format(method))\n\n # Specify callback for fitting\n itr = [0]\n def callback(x_flat):\n itr[0] += 1\n print(\"Iteration {} loss: {:.3f}\".format(itr[0], loss(unflatten(x_flat), -1)))\n\n # Wrap the gradient to avoid NaNs\n def safe_grad(x, itr):\n g = grad(_objective)(x, itr)\n g[~np.isfinite(g)] = 1e8\n return g\n\n # Call the optimizer. Pass in -1 as the iteration since it is unused.\n result = minimize(_objective, _x0, args=(-1,),\n jac=safe_grad,\n method=method,\n callback=callback if verbose else None,\n options=dict(maxiter=num_iters, disp=verbose),\n tol=tol,\n **kwargs)\n if verbose:\n print(\"{} completed with message: \\n{}\".format(method, result.message))\n\n if not suppress_warnings and not result.success:\n warn(\"{} failed with message:\\n{}\".format(method, result.message))\n\n if full_output:\n return unflatten(result.x), result\n else:\n return unflatten(result.x)", "def example():\n Optimizer = BFGS(f, g)\n startPoint = 100 * numpy.ones(2);\n res = Optimizer.optimize(startPoint,\n epsilon=1e-5,\n maxIterations=10)\n print res\n pass", "def function_to_minimize(x):\n return math.sin(x[0]) * math.cos(x[1]) + math.cos(x[0] + x[1]) + random.uniform(-0.02, 0.02)", "def optimize(self, x0, n_iter = 50):\n x0 = np.asarray(x0, dtype=np.float32)\n opt = proximal_alg.ProximalGradSolver(self.gamma, self.alpha, lambda x: self.costFun(x,self.input), lambda x: np.sum(np.abs(x)), lambda x: self.gradFun(x, self.input), proximal_alg.prox_l1_01)\n result = opt.minimize(x0, n_iter = n_iter)\n return result", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def _partial_optimize(\n self,\n optimize_nodes,\n evaluate_nodes,\n fall_to_simplex=True,\n minimizer=\"Powell\",\n use_basin=False,\n debug=False,\n minimizer_kwargs=None,\n basin_kwargs=None,\n ):\n if minimizer_kwargs is None:\n minimizer_kwargs = {}\n if basin_kwargs is None:\n basin_kwargs = {}\n\n non_observeds = [x for x in optimize_nodes if not x.observed]\n\n init_vals = [node.value for node in non_observeds]\n\n # define function to be optimized\n def opt(values):\n if debug:\n print(values)\n for value, node in zip(values, optimize_nodes):\n node.set_value(value)\n try:\n logp_optimize = [node.logp for node in optimize_nodes]\n logp_evaluate = [node.logp for node in evaluate_nodes]\n neglogp = -np.sum(logp_optimize) - np.sum(logp_evaluate)\n if debug:\n print(neglogp)\n return neglogp\n except pm.ZeroProbability:\n if debug:\n print(\"Outside support!\")\n return np.inf\n\n # optimize\n if use_basin:\n try:\n minimizer_kwargs_passed = {\n \"method\": minimizer,\n \"options\": minimizer_kwargs,\n }\n basinhopping(\n opt,\n init_vals,\n minimizer_kwargs=minimizer_kwargs_passed,\n **basin_kwargs\n )\n except:\n if fall_to_simplex:\n print(\n \"Warning: Powell optimization failed. Falling back to simplex.\"\n )\n minimizer_kwargs_passed = {\n \"method\": minimizer,\n \"options\": minimizer_kwargs,\n }\n basinhopping(\n opt,\n init_vals,\n minimizer_kwargs=minimizer_kwargs_passed,\n **basin_kwargs\n )\n else:\n raise\n else:\n try:\n minimize(opt, init_vals, method=minimizer, options=minimizer_kwargs)\n except:\n if fall_to_simplex:\n print(\n \"Warning: Powell optimization failed. Falling back to simplex.\"\n )\n minimize(\n opt, init_vals, method=\"Nelder-Mead\", options=minimizer_kwargs\n )\n else:\n raise", "def minimize(self):\n self.abstract_obj.minimize()", "def evaluate_jacobian(self, x, V):\n jac = self._numeric_jacobian(x, V, *self.model.params.values())\n return jac", "def minimize(self, func, grad, x0, args=()):\n learning_rate = self._learning_rate\n best_x = x = x0\n best_value = func(x, *args)\n iters_without_improve = 0\n\n for iteration in range(self._max_iterations):\n gradient = grad(x, *args)\n\n # If absolute values of all partial derivatives are equal to 0 with specified accuracy, then parameters are\n # close enough to the minimum and there is no need to continue gradient descent.\n if np.abs(gradient).max() <= self._accuracy:\n break\n\n x = x - learning_rate * gradient\n\n # If new values of x haven't lead to decrease of the function value for the specified number of iteration,\n # the x is reverted to its previous best value and the learning rate is reduced\n value = func(x, *args)\n if value > best_value:\n iters_without_improve += 1\n if iters_without_improve >= self._lr_reduce_patience:\n x = best_x\n learning_rate *= self._lr_reduce_factor\n else:\n iters_without_improve = 0\n best_value = value\n best_x = x\n\n return best_x", "def logtrick_minimizer(minimizer):\n @wraps(minimizer)\n def new_minimizer(fun, x0, jac=True, bounds=None, **minimizer_kwargs):\n\n if bounds is None:\n return minimizer(fun, x0, jac=jac, bounds=bounds,\n **minimizer_kwargs)\n\n logx, expx, gradx, bounds = logtrick_gen(bounds)\n\n # Intercept gradient\n if callable(jac):\n def new_jac(x, *fargs, **fkwargs):\n return gradx(jac(expx(x), *fargs, **fkwargs), x)\n else:\n new_jac = jac\n\n # Intercept objective\n if (not callable(jac)) and bool(jac):\n def new_fun(x, *fargs, **fkwargs):\n o, g = fun(expx(x), *fargs, **fkwargs)\n return o, gradx(g, x)\n else:\n def new_fun(x, *fargs, **fkwargs):\n return fun(expx(x), *fargs, **fkwargs)\n\n # Transform the final result\n result = minimizer(new_fun, logx(x0), jac=new_jac, bounds=bounds,\n **minimizer_kwargs)\n result['x'] = expx(result['x'])\n return result\n\n return new_minimizer", "def numerical_jacobian (fhandle, x, **args):\n \n y = fhandle (x, **args)\n numRows, numCols = (len (y), len (x))\n J = np.zeros ((numRows, numCols))\n\n for col in range (0, numCols):\n xPrime = x.copy ()\n deltaX = max (1e-4*x[col], 1e-6)\n xPrime[col] += deltaX\n yPrime = fhandle (xPrime, **args)\n J[:, col] = (yPrime - y) / deltaX\n\n return J", "def optimization_manager(config):\n def optimize(state,\n grad,\n warmup=config.optim.warmup,\n grad_clip=config.optim.grad_clip):\n \"\"\"Optimizes with warmup and gradient clipping (disabled if negative).\"\"\"\n lr = state.lr\n if warmup > 0:\n lr = lr * jnp.minimum(state.step / warmup, 1.0)\n if grad_clip >= 0:\n # Compute global gradient norm\n grad_norm = jnp.sqrt(\n sum([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(grad)]))\n # Clip gradient\n clipped_grad = jax.tree_map(\n lambda x: x * grad_clip / jnp.maximum(grad_norm, grad_clip), grad)\n else: # disabling gradient clipping if grad_clip < 0\n clipped_grad = grad\n return state.optimizer.apply_gradient(clipped_grad, learning_rate=lr)\n\n return optimize", "def func(x):\n return jnp.sum(jnp.power(jnp.sin(x), 2))", "def J(self, name, q, x=None):\n\n x = self.x_zeros if x is None else x\n funcname = name + '[0,0,0]' if np.allclose(x, 0) else name\n # check for function in dictionary\n if self._J.get(funcname, None) is None:\n self._J[funcname] = self._calc_J(name=name, x=x)\n parameters = tuple(q) + tuple(x)\n return np.array(self._J[funcname](*parameters), dtype='float32')", "def test_jax(self, approx_order, strategy, tol):\r\n jax = pytest.importorskip(\"jax\")\r\n from jax import numpy as jnp\r\n from pennylane.interfaces.jax import JAXInterface\r\n from jax.config import config\r\n\r\n config.update(\"jax_enable_x64\", True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.543, -0.654])\r\n\r\n def cost_fn(x):\r\n with JAXInterface.apply(qml.tape.QubitParamShiftTape()) as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn([t.execute(dev) for t in tapes])\r\n return jac\r\n\r\n res = jax.jacobian(cost_fn)(params)\r\n x, y = params\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def vjp(func, x, backend='autograd'):\n if backend == 'autograd':\n return ag.make_vjp(func, x)\n elif backend == 'pytorch':\n raise NotImplementedError('VJP for Pytorch backend is not implemented yet.')", "def logexp_optimise(fn, x, **kwargs):\n def transform(x):\n theta = logexp_to_natural(x)\n y,grad = fn(theta)\n # get gradients back to our space\n grad *= logexp_gradientfactor(theta)\n return (y,grad)\n res = spo.minimize(transform, natural_to_logexp(x), jac=True,\n **kwargs)\n res.x = logexp_to_natural(res.x)\n return res", "def minimize_scalar(\n func: Callable,\n bracket: Optional[Union[Sequence[float]]] = None,\n bounds: Optional[Sequence[float]] = None,\n args: Union[Tuple, Tuple[Any]] = (),\n method: str = \"brent\",\n tol: Optional[float] = None,\n options: Optional[dict] = None,\n) -> spopt.OptimizeResult:\n\n def f(x, *args):\n # Wrap jax-based function `func` to return a numpy float rather\n # than a jax array of size (1,)\n return func(x, *args).item()\n\n res = spopt.minimize_scalar(\n fun=f,\n bracket=bracket,\n bounds=bounds,\n args=args,\n method=method,\n tol=tol,\n options=options,\n )\n return res", "def arg_min_scalar(objective, **kwargs):\n return minimize_scalar(objective, **kwargs).x", "def J_infomax(alpha=.5):\n def J(joint, _, i=None, verbose=False):\n marginal = joint.sum(axis=G_axis, keepdim=True)\n conditional = joint / marginal # p(g|x) = p(g, x) / p(x)\n H_G_given_X = -(joint * conditional.log()).sum()\n H_X = -(marginal * marginal.log()).sum()\n loss = H_G_given_X + alpha*H_X\n if verbose:\n H_X_given_G = -(joint * (joint / joint.sum(X_axis, keepdim=True)).log()).sum()\n print(\"epoch =\", i,\n \"H[G|X] =\", H_G_given_X.item(),\n \"H[X] =\", H_X.item(),\n \"loss =\", loss.item())\n return loss\n return J", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def softmax_jacobian_analytic(x, dim):\n y = F.softmax(x, dim)\n y[y != y] = 0 # replace nan-s with zeros\n J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)\n si = [slice(None)] * len(y.shape)\n sj = [slice(None)] * len(y.shape)\n s = [slice(None)] * len(J.shape)\n for i in range(y.shape[dim]):\n si[dim] = i\n s[dim + 1] = i\n yi = y[tuple(si)]\n for j in range(y.shape[dim]):\n sj[dim] = j\n s[0] = j\n if i == j:\n J[tuple(s)] = yi * (1 - yi)\n else:\n yj = y[tuple(sj)]\n J[tuple(s)] = - yi * yj\n sj[dim] = slice(None)\n si[dim] = slice(None)\n s[dim + 1] = slice(None)\n return J", "def minimize_pygmo_np(func, x0, bounds, origin, algo_name, algo_options, gradient=None):\n if origin == \"pygmo\" and algo_name != \"simulated_annealing\":\n assert (\n \"popsize\" in algo_options\n ), f\"For genetic optimizers like {algo_name}, popsize is mandatory.\"\n assert (\n \"gen\" in algo_options\n ), f\"For genetic optimizers like {algo_name}, gen is mandatory.\"\n\n prob = _create_problem(func, bounds, origin, gradient)\n algo = _create_algorithm(algo_name, algo_options, origin)\n pop = _create_population(prob, algo_options, x0)\n evolved = algo.evolve(pop)\n result = _process_pygmo_results(evolved)\n\n return result", "def _optimize_f(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda *args,**kwargs:self.f(*args,**kwargs)\n elif type == 'max':\n g=lambda *args,**kwargs:-1*self.f(*args,**kwargs)\n elif type == 'root':\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,tuple(self.parvals),**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,tuple(self.parvals),**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]", "def intern_J(self):\n if self.Fz is None:\n fz_none = True\n else:\n fx, fy, fu = self.Fz\n fz_none = False\n if self.A is None:\n def J(x,y):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _, _ = self.F(x,y)\n xp, _, _ = minus(x, fx)\n xp, _, _ = operator_P(self.proj, xp)\n xp, _, _ = minus(x, xp)\n return LA.norm(xp),None,None\n else:\n if fz_none:\n fx, fy, _ = self.F(x,y)\n xp, yp, _ = minus(x, fx, y, fy)\n xp, yp, _ = operator_P(self.proj, xp, yp)\n xp, yp, _ = minus(x, xp, y, yp)\n total = np.concatenate((xp, yp))\n return LA.norm(xp)+LA.norm(yp),None,None\n else:\n def J(x,y,u):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _,fu = self.F(x,y,u)\n xp, up, _ = minus(x, fx, u, fu)\n xp, _, up = operator_P(self.proj, xp, None, up)\n xp, up, _ = minus(x, xp, u, up)\n total = np.concatenate((xp, up))\n return LA.norm(xp)+LA.norm(up),None,None\n else:\n if fz_none:\n fx, fy, fu = self.F(x,y,u)\n xp, yp, up = minus(x, fx, y, fy, u, fu)\n xp, yp, up = operator_P(self.proj, xp, yp, up)\n xp, yp, up = minus(x, xp, y, yp, u, up)\n total = np.concatenate((xp, yp, up))\n return LA.norm(xp)+LA.norm(yp)+LA.norm(up),None,None\n return J", "def costFun(self, S, x):", "def _solve(self) -> CasADiArrayType:\n self._solution = minimize(**self.minimize_input)\n return self._solution.x", "def gradFun(self, S, x):", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def minimizer(f, x, optimizer, grad_f, hess_f=None,\n args=(),\n maxiter=None, tol=1e-5,\n stepsize=1, adaptive=True,\n bounds=None,\n disp=False):\n min_obj = {'steepest': SteepestDescent,\n 'conjugate': ConjugateDescent,\n 'newton': NewtonDescent,\n 'cg': ScipyCG,\n 'ncg': ScipyNCG,\n 'bfgs': ScipyBFGS,\n 'lbfgs': ScipyLBFGS}\n\n if optimizer not in min_obj.keys():\n raise ValueError('unknown optimizer')\n local_meth = optimizer in ('steepest', 'conjugate', 'newton')\n\n if local_meth:\n proj = None\n if not bounds is None:\n if callable(bounds):\n proj = bounds\n else:\n proj = bounds_to_proj(bounds)\n\n return min_obj[optimizer](f, x, grad_f, hess_f=hess_f,\n maxiter=maxiter, tol=tol,\n stepsize=stepsize, adaptive=adaptive,\n proj=proj)\n\n if not bounds is None and optimizer != 'lbfgs':\n raise NotImplementedError('%s optimization method does not accept constraints' % optimizer)\n \n return min_obj[optimizer](f, x, grad_f, hess_f=hess_f,\n maxiter=maxiter, tol=tol,\n bounds=bounds, disp=disp)", "def test_jacobian_options(self, mocker):\n spy = mocker.spy(qml.gradients, \"param_shift\")\n\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n gradient_kwargs={\"shifts\": [(np.pi / 4,)] * 2},\n )[0]\n\n jax.grad(cost)(a, device=dev)\n\n for args in spy.call_args_list:\n assert args[1][\"shifts\"] == [(np.pi / 4,)] * 2", "def add_straight_through_estimator(jax_function):\n\n # See\n # https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html\n def ste(primals, tangents):\n return jax_function(primals[0]), tangents[0]\n\n jax_function.defjvp(ste)", "def Jimpl(graph: Graph, resources, node):\n return _grad(graph)", "def test_scalar_jacobian(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n dev = qml.device(\"default.qubit\", wires=2)\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], dev, **execute_kwargs)[0]\n\n res = jax.jit(jax.grad(cost))(a)\n assert res.shape == ()\n\n # compare to standard tape jacobian\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n tape.trainable_params = [0]\n tapes, fn = param_shift(tape)\n expected = fn(dev.batch_execute(tapes))\n\n assert expected.shape == ()\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def softmax_jacobian_autograd(x, dim, log=False):\n import itertools\n\n if x.is_sparse:\n x = x.coalesce()\n\n dtype = x.dtype\n device = x.device\n shape = tuple(x.shape)\n J = torch.zeros((shape[dim],) + shape, dtype=dtype, device=device)\n for i in range(shape[dim]):\n if x.is_sparse:\n sparse_dim = x.sparse_dim()\n dense_dim = x.dense_dim()\n if dim < sparse_dim:\n ranges = []\n for j, sz in enumerate(shape[:sparse_dim]):\n if dim == j:\n ranges.append([i])\n else:\n ranges.append(list(range(sz)))\n indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()\n values = torch.ones((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)\n else:\n ranges = []\n for j, sz in enumerate(shape[:sparse_dim]):\n ranges.append(list(range(sz)))\n indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()\n values = torch.zeros((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)\n sv = [slice(None)] * (dense_dim + 1)\n sv[dim - sparse_dim + 1] = i\n values[tuple(sv)] = 1\n v = torch.sparse_coo_tensor(indices, values, shape, dtype=dtype, device=device)\n else:\n v = torch.zeros_like(x)\n sv = [slice(None)] * len(v.shape)\n sv[dim] = i\n v[tuple(sv)] = 1\n x_ = x.clone()\n x_.requires_grad_(True)\n\n if log:\n if x_.is_sparse:\n y = torch.sparse.log_softmax(x_, dim)\n else:\n y = F.log_softmax(x_, dim)\n else:\n if x_.is_sparse:\n y = torch.sparse.softmax(x_, dim)\n else:\n y = F.softmax(x_, dim)\n # replace nan-s with zeros\n y.data[y != y] = 0\n y.backward(v)\n g = x_.grad\n if not g.is_sparse:\n # replace nan-s with zeros\n g.data[g != g] = 0\n J[i] = g.to_dense() if g.is_sparse else g\n return J", "def fit(x_array, y_array, function, A_start):\n param = (x_array, y_array, function)\n\n A_final, cov_x, infodict, mesg, ier = leastsq(minimize, A_start, args=param, full_output=True)#, warning=True)\n \n return A_final", "def gradient_supplied(fun, x0, jac, info):\n result = OptimizerResult()\n result.x = x0\n result.fun = 0\n info[\"has_gradient\"] = jac is not None\n\n return result", "def optim_solve(\n self, x0: devices.PrimaryWeights = None, global_search: bool = False, **kwargs\n ) -> scipy.optimize.OptimizeResult:\n print(f'{\" optim_solve \":~^60s}')\n self._assert_problem_is_valid()\n if self._background is None:\n bounds = self.bounds * 2\n print(\"> No background specified, will optimise background.\")\n else:\n bounds = self.bounds\n\n if np.inf in self._target_contrast:\n print(\"> Aiming to maximise contrast.\")\n\n elif -np.inf in self._target_contrast:\n print(\"> Aiming to minimize contrast.\")\n\n constraints = [\n {\"type\": \"eq\", \"fun\": self.silencing_constraint, \"tol\": 1e-04}\n ]\n\n if x0 is None:\n x0 = self.initial_guess_x0()\n \n if not global_search: # Local minimization\n\n default_options = {\"iprint\": 2, \"disp\": True, \"ftol\": 1e-08}\n options = kwargs.pop(\"options\", default_options)\n\n print(\"> Performing local optimization with SLSQP.\")\n result = scipy.optimize.minimize(\n fun=self.objective_function,\n x0=x0,\n method=\"SLSQP\",\n bounds=bounds,\n constraints=constraints,\n options=options,\n **kwargs,\n )\n\n elif global_search: # Global minimization\n print(\n \"> Performing global optimization with basinhopping and SLSQP\"\n )\n\n # Configure global defaults\n disp = kwargs.pop(\"disp\", True)\n # Configure local defaults\n default_minimizer_kwargs = {\n \"method\": \"SLSQP\",\n \"constraints\": constraints,\n \"bounds\": bounds,\n \"options\": {\"iprint\": 2, \"disp\": False},\n }\n minimizer_kwargs = kwargs.pop(\n \"minimizer_kwargs\", default_minimizer_kwargs\n )\n\n # Do optimization\n result = scipy.optimize.basinhopping(\n func=self.objective_function,\n x0=x0,\n minimizer_kwargs=minimizer_kwargs,\n disp=disp,\n **kwargs,\n )\n\n return result", "def _optimize(self, objective):\n points = self._get_eval_points()\n\n if self.matrix_to_vector_transform is not None:\n # Transform the sampled matrix points in vectors\n points = np.array([self.matrix_to_vector_transform(points[i]) for i in range(self._nb_samples)])\n\n evaluations = objective(points)\n idx_best = np.argmin(evaluations, axis=0)\n\n return sc_opt.OptimizeResult(x=points[idx_best, :], success=True, fun=evaluations[idx_best, :],\n nfev=points.shape[0], message=\"OK\")", "def __init__(self, optimizer='BFGS', optimizer_kwargs=None,\n lossprime=True, max_iterations = 1000000):\n\n user_kwargs = optimizer_kwargs\n optimizer_kwargs = {}\n print(f\"in {optimizer}: max_iterations = {max_iterations}\")\n if optimizer == 'BFGS':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method' : 'BFGS',\n 'options': {'gtol': 1e-15,\n 'maxiter': max_iterations}\n }\n #optimizer_kwargs = {'method':'BFGS', 'gtol': 1e-15, }\n elif optimizer == 'L-BFGS-B':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'L-BFGS-B',\n 'options': {'ftol': 1e-05,\n 'gtol': 1e-08,\n 'maxfun': max_iterations,\n 'maxiter': max_iterations}\n }\n import scipy\n from distutils.version import StrictVersion\n if StrictVersion(scipy.__version__) >= StrictVersion('0.17.0'):\n optimizer_kwargs['options']['maxls'] = 2000\n elif optimizer == 'TNC':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'TNC',\n 'options': {'ftol': 0.,\n 'xtol': 0.,\n 'gtol': 1e-08,\n 'maxiter': max_iterations, }\n }\n elif optimizer == 'Newton-CG':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Newton-CG',\n 'options': {'xtol': 1e-15,\n 'maxiter': max_iterations,}\n }\n\n elif optimizer == 'Nelder-Mead':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Nelder-Mead',\n 'options': {'maxfun': max_iterations,\n 'maxiter': max_iterations, }\n }\n lossprime = False\n\n if user_kwargs:\n optimizer_kwargs.update(user_kwargs)\n self.optimizer = optimizer\n self.optimizer_kwargs = optimizer_kwargs\n self.lossprime = lossprime", "def Optimize(self):\n return _gmat_py.Optimizer_Optimize(self)", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def calc_jacobian(\n model: nn.Module,\n latents: torch.Tensor,\n normalize: bool = False,\n eps: float = 1e-8,\n vectorize=False,\n reverse_ad=True,\n norm_range=True,\n norm_diagonal=False,\n) -> torch.Tensor:\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n with torch.set_grad_enabled(True):\n jacob = []\n input_vars = latents.clone().requires_grad_(True)\n\n output_vars = model(input_vars)\n if not vectorize:\n for i in range(output_vars.shape[1]):\n jacob.append(\n torch.autograd.grad(\n output_vars[:, i : i + 1],\n input_vars,\n create_graph=True,\n grad_outputs=torch.ones(output_vars[:, i : i + 1].shape).to(\n output_vars.device\n ),\n )[0].detach()\n )\n\n jacobian = torch.stack(jacob, 1)\n else:\n from functorch import vmap, jacrev, jacfwd\n\n if reverse_ad is True:\n jac_fn = jacrev\n else:\n jac_fn = jacfwd\n\n sample_jacobian = jac_fn(model.forward, argnums=0)\n jacobian = vmap(\n lambda x: sample_jacobian(torch.unsqueeze(x, 0)), in_dims=0\n )(input_vars).squeeze()\n\n if normalize is True:\n # normalize the Jacobian by making it volume preserving\n # jacobian /= jacobian.det().abs().pow(1 / jacobian.shape[-1]).reshape(-1, 1, 1)\n\n # normalize to make variance to 1\n # norm_factor = (output_vars.std(dim=0) + 1e-8)\n # jacobian /= norm_factor.reshape(1, 1, -1)\n if norm_range is True:\n # normalize range to [0;1]\n dim_range = (\n (output_vars.max(dim=0)[0] - output_vars.min(dim=0)[0])\n .abs()\n .reshape(-1, 1)\n )\n\n jacobian /= dim_range + eps\n elif norm_diagonal is True:\n assert (dim := jacobian.shape[1]) == jacobian.shape[2]\n jacobian /= jacobian[:, (r := torch.arange(dim)), r].unsqueeze(-1) + eps\n\n # set back to original mode\n if in_training is True:\n model.train()\n\n return jacobian", "def objective_func(self, topology, grad_func, tmax, eta):\n f = objective_function_numpy\n x_func = partial(self._optimize_form, topology=topology, tmax=tmax, eta=eta)\n return partial(f, x_func=x_func, grad_func=grad_func)", "def newton_quad(f, x0, dx, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Fill the Jacobian matrix\n for j in range(n):\n x[j] += dx[j]\n df = f(x) - fx\n\n for i in range(n):\n J[i, j] = df[i] / dx[j]\n\n x[j] -= dx[j]\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin quadratic linesearch \n lamb = 1.0\n y = x + Dx * lamb\n fy = f(y)\n\n fxnorm = np.linalg.norm(fx)\n fynorm = np.linalg.norm(fy)\n\n # Define the known values of the minimization function (Eq. 9)\n g0 = 0.5 * fxnorm ** 2\n dg0 = - fxnorm ** 2\n\n while (fynorm > (1 - lamb / 2) * fxnorm) and (lamb > (1 / 128.0)):\n glamb = 0.5 * fynorm ** 2\n c = (glamb - g0 - dg0 * lamb) / (lamb ** 2)\n\n # Update step\n lamb = - dg0 / (2 * c)\n y = x + Dx * lamb\n fy = f(y)\n fynorm = np.linalg.norm(fy)\n\n # Save latest approximation\n x = y\n fx = fy\n\n Dxnorm = np.linalg.norm(Dx)\n dxnorm = np.linalg.norm(dx)\n if Dxnorm < dxnorm or fxnorm < eps:\n break\n\n return x", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def calc_JF(X, P, H, TargetVar, V = None, EPIC_bool = None, regularize = None):\n if V is None:\n beta = X\n else:\n beta = V.dot(X)\n\n invCh = NP.diag(NP.exp(beta))\n A = P + H.T.dot(invCh.dot(H))\n invA = inv(A)\n # assemble JF\n # fill the derivatives with respect to each beta\n B = H.dot(invA)\n BB = B * B\n E = NP.diag( NP.exp(beta) )\n JF = NP.transpose( -1.0 * E.dot(BB) )\n\n if V is not None:\n JF = JF.dot(V)\n\n if EPIC_bool is not None:\n JF = JF[EPIC_bool, :]\n\n JF = NP.diag(1/TargetVar).dot(JF)\n\n # extend JF if using regularization to compute EPIC\n # Note that here the EPIC will be approximately met.\n if regularize is not None:\n if 'sigma_weight' not in regularize.keys():\n sigma_weight = sigma_weight_default\n else:\n sigma_weight = regularize['sigma_weight']\n\n # add the jacobian of the Wh damping\n JF2 = 0.5 * NP.diag(NP.exp(beta/2)) / sigma_weight\n \n JF = NP.vstack((JF, JF2))\n\n return JF", "def _calc_J(self, name, x, lambdify=True):\n\n J = None\n J_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_J'\n\n # check to see if should try to load functions from file\n J, J_func = self._load_from_file(filename, lambdify)\n\n if J is None and J_func is None:\n # if no saved file was loaded, generate function\n print('Generating Jacobian function for %s' % filename)\n\n Tx = self._calc_Tx(name, x=x, lambdify=False)\n # NOTE: calculating the Jacobian this way doesn't incur any\n # real computational cost (maybe 30ms) and it simplifies adding\n # the orientation information below (as opposed to using\n # sympy's Tx.jacobian method)\n # TODO: rework to use the Jacobian function and automate\n # derivation of the orientation Jacobian component\n J = []\n # calculate derivative of (x,y,z) wrt to each joint\n for ii in range(self.N_JOINTS):\n J.append([])\n J[ii].append(Tx[0].diff(self.q[ii])) # dx/dq[ii]\n J[ii].append(Tx[1].diff(self.q[ii])) # dy/dq[ii]\n J[ii].append(Tx[2].diff(self.q[ii])) # dz/dq[ii]\n\n if 'EE' in name:\n end_point = self.N_JOINTS\n elif 'link' in name:\n end_point = int(name.strip('link'))\n elif 'joint' in name:\n end_point = int(name.strip('joint'))\n # can't have more joint derivatives than there are joints\n end_point = min(end_point, self.N_JOINTS)\n\n # add on the orientation information up to the last joint\n for ii in range(end_point):\n J[ii] = J[ii] + list(self.J_orientation[ii])\n # fill in the rest of the joints orientation info with 0\n for ii in range(end_point, self.N_JOINTS):\n J[ii] = J[ii] + [0, 0, 0]\n J = sp.Matrix(J).T # correct the orientation of J\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(J, open(\n '%s/%s/%s' % (self.config_folder, filename, filename), 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return J\n\n if J_func is None:\n J_func = self._generate_and_save_function(\n filename=filename, expression=J,\n parameters=self.q+self.x)\n return J_func", "def _optfn(self, x):\n\n logger.debug(\" optfn(theta=%s)\", str(x))\n\n wmx = max(self.weights) * self.weighttrunc\n\n ip = []\n for i,w in enumerate(self.weights):\n if w < wmx:\n continue\n ip.append((i,w,x))\n\n if self.pool is None:\n itr = map(self.worker.loglik_grad, ip)\n else:\n itr = self.pool.imap_unordered(_pool_loglik_grad, ip, 10)\n\n if self._prior_shape is None:\n ll = 0.\n grad = np.zeros(len(x))\n else:\n ll = sum(sp.special.xlogy(self._prior_shape-1,x)-(x/self._prior_scale))\n grad = (self._prior_shape - 1)/x - 1/self._prior_scale\n\n for l,g in itr:\n ll += l\n grad += g\n\n logger.debug(\" optfn=%g\", ll)\n\n return -ll, -grad", "def jacobval(state, time, press):\n a = len(state)\n jacobian = np.zeros(a**2)\n pyjacob.py_eval_jacobian(time, press, state, jacobian)\n jacobian = np.reshape(jacobian, (a,a))\n return jacobian", "def SF_ML(jd,mag,errmag,x0=[0.5, 0.5],bnds=((0.0, 3.0), (0.0,3.0))):\n\n dtarray, dmagarray, sigmaarray = SFarray(jd,mag,errmag)\n ndt=np.where((dtarray<=365))\n dtarray=dtarray[ndt]\n dmagarray=dmagarray[ndt]\n sigmaarray=sigmaarray[ndt]\n\n\n x0 = [0.5, 0.5]\n bnds = ((0.0, 3.0), (0.0,3.0))\n\n #res = sp.optimize.minimize(neg_lnlike, x0, args=(dtarray, dmagarray, sigmaarray),\n # method='L-BFGS-B', bounds=bnds, options={'ftol': 1e-15, 'gtol': 1e-10, 'eps': 1e-08, 'maxfun': 150000, 'maxiter': 150000, 'maxls': 40})\n\n res = sp.optimize.minimize(neg_lnlike, x0, args=(dtarray, dmagarray, sigmaarray),\n method='Nelder-Mead', bounds=bnds, options={'fatol': 1e-10, 'xatol': 1e-10, 'maxiter': 15000})\n\n g_min = res.x[0]\n a_min = res.x[1]\n\n return(g_min, a_min)", "def test_jv():\n import time\n t1 = time.time()\n\n v_list = [ 3.3, 4, 1.9, 0, 9.2, -7.1 ]\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.jv(v,x) for v,x in zip(v_list,x_list) ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.jv(v,x) for v,x in zip(v_list,x_list) ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jv disagrees with scipy.special.jv\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of jv.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 0.0,\n 0.0025745895535573995,\n 0.0068656051839294848,\n -0.34429626039888467,\n 0.015134049434950021,\n 0.087784805831697565\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jv disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)", "def optimize(\n self, n, tol=None, jac=True, hessp=False, optlib=\"scipy\", **options\n ):\n return {\"scipy\": self.optimize_scipy, \"nlopt\": self.optimize_nlopt,}[\n optlib\n ](n=n, tol=tol, jac=jac, hessp=hessp, **options)", "def minimize(self, fun, x_0, bounds=None):\n x = np.copy(x_0).reshape(-1)\n opt = climin.Adadelta(wrt=x, fprime=fun, step_rate=self.step_rate, momentum=self.momentum,\n decay=self.decay, offset=self.offset)\n\n x_list = [x.copy()]\n time_list = [0.]\n start = time.time()\n\n for info in opt:\n i = info['n_iter']\n if i > self.maxiter:\n break\n \n if self.disp and not (i % self.print_freq):\n grad = info['gradient']\n print('Epoch', int(i / self.iter_per_epoch), ':')\n print('\\tx', x.reshape(-1)[:5])\n print(\"\\tGradient norm\", np.linalg.norm(grad))\n \n if not i % int(self.iter_per_epoch):\n x_list.append(x.copy())\n time_list.append(time.time() - start)\n\n stat_dict = {'time_lst': time_list, 'x_lst': x_list, 'fun': None, 'time': time_list[-1], \n 'info': info}\n\n return x.copy(), stat_dict", "def return_lxx_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[k1*1*dt,0],[0,0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,k2*1*dt]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def test_jn():\n import time\n t1 = time.time()\n\n n_list = [ 3, 4, 1, 0, 9, 7 ]\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.jn(n,x) for n,x in zip(n_list,x_list) ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.jn(n,x) for n,x in zip(n_list,x_list) ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jn disagrees with scipy.special.jn\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of jn.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 0.0,\n 0.0025745895535573995,\n 0.099500832639236036,\n -0.34429626039888467,\n 0.018796532416195257,\n -0.082526868218916541\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jn disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def fmin(evaluator, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, callback=None):\n fcalls, func = wrap_function(evaluator.target)\n x0 = evaluator.x\n #x0 = asfarray(x0).flatten()\n N = len(x0)\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n sim = []\n fsim = [.0]*(N+1)\n for i in range(0,N+1):\n sim.append([.0]*(N+1))\n\n sim[0] = x0\n \n fsim[0] = func(x0)\n nonzdelt = 0.05\n zdelt = 0.00025\n for k in range(0,N):\n y = list(x0)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = sort_permutation(fsim)\n fsim = apply_permutation(fsim,ind)\n # sort so sim[0,:] has the lowest function value\n sim = apply_permutation(sim,ind)\n evaluator.x = sim[0]\n\n iterations = 1\n\n \n while (fcalls[0] < maxfun and iterations < maxiter):\n sim_size = max(map(lambda x : max(map(abs,map(operator.sub, x, sim[0]))),sim[1:]))\n #print \"The simplex size is %.6g(tol=%.6g)\"%(sim_size,xtol)\n fsim_size = max( map(lambda x: abs(x-fsim[0]), fsim[1:]))\n #print \"The simplex image size is %.6g(tol=%.6g)\"%(fsim_size, ftol)\n if ( sim_size <= xtol ) \\\n and fsim_size <=ftol:\n break\n# if (max(numpy.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n# and max(abs(fsim[0]-fsim[1:])) <= ftol):\n# break\n\n xbar = averageArrays(sim[:-1])\n xr = linearCombine((1+rho),xbar, - rho,sim[-1])\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = linearCombine((1+rho*chi),xbar, - rho*chi,sim[-1])\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = linearCombine((1+psi*rho),xbar, - psi*rho,sim[-1])\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = linearCombine((1-psi),xbar, psi,sim[-1])\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = linearCombine((1-sigma),sim[0] , sigma,sim[j])\n fsim[j] = func(sim[j])\n\n ind = sort_permutation(fsim)\n sim = apply_permutation(sim,ind)\n fsim = apply_permutation(fsim,ind)\n evaluator.x = sim[0]\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n printOut(\"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\")\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n printOut(\"Warning: Maximum number of iterations has been exceeded\")\n else:\n if disp:\n printOut(\"Optimization terminated successfully.\")\n printOut(\" Current function value: %f\" % fval)\n printOut(\" Iterations: %d\" % iterations)\n printOut(\" Function evaluations: %d\" % fcalls[0])\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n else:\n retlist = x\n\n return retlist", "def test_coefficients_jax_interface(self):\n import jax\n\n # Need to enable float64 support\n from jax.config import config\n\n remember = config.read(\"jax_enable_x64\")\n config.update(\"jax_enable_x64\", True)\n\n qnode = qml.QNode(self.circuit, self.dev, diff_method=\"parameter-shift\")\n\n weights = jax.numpy.array([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)\n\n config.update(\"jax_enable_x64\", remember)", "def cost_fun(x, problem):\n j = 0\n if problem['use_log_bar']:\n c = ineqconstr(x, problem)\n j += np.sum(logbarrierfunc(0.1, c, problem['use_sigma']))\n\n x, t_final = matrify(x, problem)\n if problem['T']!=0:\n j += np.sum([problem['cost_fun_single'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])\n else:\n j = t_final\n return j", "def myleastsq(errfunc0,x0,args=None,bounds=None,**exkw):\n from scipy import optimize\n if hasattr(optimize,'minimize'):\n def errfunc(x,*iargs):\n return sum(errfunc0(x,*iargs)**2)\n if args is not None: exkw['args'] = args\n res = optimize.minimize(errfunc,x0[:],bounds=bounds,**exkw)\n return res.x,res.success\n else:\n lres = sys.float_info.max\n def errfunc(x,*iargs):\n if bounds!=None:\n for idx in range(len(x)):\n if bounds[idx][0]!=None and x[idx]<bounds[idx][0]: return lres\n if bounds[idx][1]!=None and x[idx]>bounds[idx][1]: return lres\n return errfunc0(x,*iargs)\n if args is not None: exkw['args'] = args\n return optimize.leastsq(errfunc,x0,**exkw)", "def newton_jacobian(f, x0, Jf, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Calculate Jacobian\n J = Jf(x)\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n fxnorm = np.linalg.norm(fx)\n if fxnorm < eps:\n break\n\n return x", "def minimize(A):\n return determinize(reverse(determinize(reverse(A))))", "def objective_function(num, x, fe_count, best):\n if num == 1:\n return sphere(x, fe_count, best)\n elif num == 2:\n return rastrigin(x, fe_count, best)\n elif num == 3:\n return rosenbrock(x, fe_count, best)\n elif num == 4:\n return schwefel(x, fe_count, best)\n elif num == 5:\n return quartic(x, fe_count, best)\n elif num == 6:\n return ackley(x, fe_count, best)\n elif num == 7:\n return schaffer(x, fe_count, best)\n elif num == 8:\n return griewank(x, fe_count, best)\n elif num == 9:\n return matyas(x, fe_count, best)\n elif num == 10:\n return trid(x, fe_count, best)\n else:\n pass", "def jit(func):\n return func", "def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method", "def objective(self, args: Dict[str, Any]) -> float:\n pass" ]
[ "0.63569784", "0.6333814", "0.6261257", "0.6101453", "0.60971725", "0.60695785", "0.5995225", "0.5894081", "0.589276", "0.5768566", "0.5727531", "0.57158124", "0.57106614", "0.57106614", "0.56989694", "0.56768656", "0.56757146", "0.5599489", "0.5598426", "0.55934316", "0.55714023", "0.5548363", "0.55149424", "0.5494964", "0.54857206", "0.548379", "0.5477905", "0.54680383", "0.5462606", "0.5418106", "0.54079217", "0.5391712", "0.5376064", "0.5374296", "0.5358151", "0.5340596", "0.5331475", "0.5324886", "0.53212416", "0.53171617", "0.53163713", "0.5314982", "0.5309024", "0.52995247", "0.5289831", "0.52780795", "0.526108", "0.52598774", "0.5255786", "0.52461135", "0.5244622", "0.5243824", "0.5240301", "0.5239567", "0.5238059", "0.52371013", "0.5233023", "0.5223054", "0.52222186", "0.52215165", "0.5212155", "0.51894265", "0.51846427", "0.5183566", "0.5181082", "0.5180313", "0.5161635", "0.51544666", "0.51522577", "0.51485467", "0.5148276", "0.5142488", "0.5139151", "0.51315856", "0.51299804", "0.51259995", "0.51249945", "0.5124526", "0.5111526", "0.5109648", "0.51081973", "0.5104323", "0.51026994", "0.5100218", "0.50973034", "0.5094177", "0.5084141", "0.50775695", "0.50727266", "0.50688595", "0.5067071", "0.50665665", "0.5053067", "0.50452435", "0.50410306", "0.50369114", "0.50273454", "0.50252044", "0.50200856", "0.5018012" ]
0.57981586
9
Ask bot to decide for you, Eg .decide hue, hue2, ...
async def decide(self, ctx, *, args: str): await ctx.send(f":point_right: **{random.choice(args.split(','))}**")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decide():", "def handDecision(handIn):", "def whats_my_color_handler(handler_input):\n # type: (HandlerInput) -> Response\n if color_slot_key in handler_input.attributes_manager.session_attributes:\n fav_color = handler_input.attributes_manager.session_attributes[\n color_slot_key]\n speech = \"Your favorite color is {}. Goodbye!!\".format(fav_color)\n handler_input.response_builder.set_should_end_session(True)\n else:\n speech = \"I don't think I know your favorite color. \" + help_text\n handler_input.response_builder.ask(help_text)\n\n handler_input.response_builder.speak(speech)\n return handler_input.response_builder.response", "def my_color_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if color_slot in slots:\n fav_color = slots[color_slot].value\n handler_input.attributes_manager.session_attributes[\n color_slot_key] = fav_color\n speech = (\"Now I know that your favorite color is {}. \"\n \"You can ask me your favorite color by saying, \"\n \"what's my favorite color ?\".format(fav_color))\n reprompt = (\"You can ask me your favorite color by saying, \"\n \"what's my favorite color ?\")\n else:\n speech = \"I'm not sure what your favorite color is, please try again\"\n reprompt = (\"I'm not sure what your favorite color is. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "async def choose(self, ctx, *args):\n choicelist = []\n for choice in args:\n choicelist.append(choice)\n result = random.choice(choicelist)\n await ctx.send(\"Like it or not, I choose {}!\".format(result))", "async def choose(self, ctx):\r\n if len(str(ctx.message.content)) < 9:\r\n await self.bot.say('{}, the usage is **!choose Option 1; Option 2; Option 3**, until you run out of options.'.format(ctx.message.author.mention))\r\n else:\r\n choices = str(ctx.message.content[8:])\r\n if '; ' not in choices:\r\n await self.bot.say('{}, the usage is **!choose Option 1; Option 2; Option 3**, ntil you run out of options.'.format(ctx.message.author.mention))\r\n else:\r\n options = choices.split('; ')\r\n await self.bot.say('{}, I choose: **{}**.'.format(ctx.message.author.mention,random.choice(options)))", "async def eightball(self, ctx, *args):\n if args:\n choices = [\"Yes, definitely.\", \"Yes.\", \"Most likely yes.\", \"I think so, yes.\",\n \"Absolutely, no question about it\", \"Maybe.\", \"Perhaps.\", \"Possibly.\",\n \"I don't think so.\", \"No.\",\n \"Most likely not.\", \"Definitely not.\", \"No way.\"]\n answer = rd.choice(choices)\n await ctx.send(f\"**{answer}**\")\n self.logger.info(misolog.format_log(ctx, f\"{answer}\"))\n else:\n await ctx.send(\"You must ask something to receive an answer!\")\n self.logger.warning(misolog.format_log(ctx, f\"question=None\"))", "def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"", "async def choose(self, ctx, *choices : str):\n await ctx.send(random.choice(choices))", "async def _defacto(self, ctx: commands.Context):\n responses = ['DI FACTO', 'di facto']\n await ctx.send(random.choice(responses))", "def get_user_input(self, game, hand, message, allowed_actions):\n if random.random() < 0.5:\n return 'hit'\n else:\n return 'stand'", "async def _conoscitore(self, ctx: commands.Context):\n responses = ['de facto, di facto', 'fanculizzati','Tra il lusco e il brusco,tra il serio e il profano,tra il serio e il faceto']\n await ctx.send(random.choice(responses))", "async def choose(*choices : str):\n await bot.say(random.choice(choices))", "def nlu_cli(default_mood, user_id):\n mood = -1 # TODO currently superfulous while loop given default mood.\n while mood not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n mood = input(\n \"Enter your current mood on a scale of 1 to 10 where \"\n + \"1 is negative, 5 is neutral, and 10 is positive (default is \"\n + str(default_mood) + \"): \"\n )\n if mood == \"\" or not mood.isdigit():\n mood = default_mood\n else:\n mood = int(mood)\n mood = default_mood if mood == \"\" else int(mood)\n\n topic = \"\"\n while topic == \"\":\n topic = input(\"Enter Topic: \").strip().lower()\n\n #loop until they select correct dialogue act, show help after first fail\n dialogue_act = \"\"\n first = True\n da_names = [da.name for da in DA if da.name not in\n ['statement', 'question', 'response_action']\n ]\n while dialogue_act not in da_names:\n dialogue_act = input(\"Enter dialogue Act: \").strip().lower()\n\n # TODO add help print out descriptions\n if first and dialogue_act not in da_names:\n first = False\n # Help, details what each dialogue act means.\n print(\"Enter a dialogue act from list below:\\n\", da_names)\n\n question_type = None\n if is_question(DA[dialogue_act]):\n question_type = \"\"\n first = True\n question_types = [qt.name for qt in QT]\n while question_type not in question_types:\n question_type = input(\"Enter question type: \").strip().lower()\n\n # TODO add help print out descriptions\n if first and question_type not in question_types:\n first = False\n # Help, details what each dialogue act means.\n print(\"Enter a question type from list below:\\n\",\n question_types)\n\n text = input(\n \"Enter utterance text: \"\n ).strip()\n\n sentiment = -1\n while sentiment not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n sentiment = input(\n \"Enter utterance sentiment 1 to 10. \"\n + \"1 negative, 5 neutral, and 10 positive: \"\n )\n sentiment = -1 if sentiment == \"\" else int(sentiment)\n\n assertiveness = -1\n while assertiveness not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n assertiveness = input(\n \"Enter utterance assertiveness 1 to 10. \"\n + \"1 passive/listening oriented, 5 neutral, and \"\n + \"10 assertive/leading conversation: \"\n )\n assertiveness = -1 if assertiveness == \"\" else int(assertiveness)\n\n return Utterance(\n user_id,\n DA[dialogue_act],\n topic,\n sentiment,\n assertiveness,\n text,\n question_type\n ), mood", "async def watdo(self, ctx, *args):\n choicelist = []\n for choice in args:\n choicelist.append(choice)\n result = random.choice(choicelist)\n await ctx.bot.send_message(ctx.message.channel, \"I pick {}!\".format(result))", "def func(self):\n try:\n if not self.switches or \"all\" in self.switches:\n self.list_favor()\n elif \"set\" in self.switches or \"add\" in self.switches:\n self.add_favor()\n elif \"remove\" in self.switches:\n self.remove_favor()\n else:\n raise CommandError(\"Invalid switch.\")\n except CommandError as err:\n self.msg(err)\n else:\n self.mark_command_used()", "def askOpponentType():\n\n opponentType = Dialog.Dialog(None, {'title': 'Battleship',\n 'text': 'Choose your opponent',\n 'bitmap': 'question',\n 'default': 0,\n 'strings': ('Human', 'Computer',)}).num\n return opponentType", "async def showcolor(self, ctx: discord.ext.commands.Context, *args):\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1:\n argstring = str(args[0]).strip()\n # request the color informations to the api\n if argstring.startswith(\"(\") and argstring.endswith(\")\"):\n url = \"http://www.thecolorapi.com/id?rgb=rgb(\"\n rgblist = argstring[1:-1].split(',')\n for color in rgblist:\n url += color.strip() + \",\"\n url = url[:-1] + \")\"\n elif argstring.startswith(\"#\"):\n url = \"http://www.thecolorapi.com/id?hex=\" + argstring[1:]\n else:\n await message_channel.send(\n \"Color format non valid, for more see \" + self.command_prefix + \"help showcolor\")\n return\n reply_error = False\n request_result = None\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp: # the website use get\n if not str(resp.status) == \"200\":\n reply_error = True\n else:\n request_result = await resp.json()\n if reply_error:\n await message_channel.send(\"*An error occurred requesting the color... is your color code valid?*\")\n else:\n embed = discord.Embed(title=\"Color Display\", url=request_result[\"image\"][\"bare\"],\n color=(request_result[\"rgb\"][\"r\"] << 16) + (request_result[\"rgb\"][\"g\"] << 8) +\n request_result[\"rgb\"][\"b\"])\n embed.set_author(name=\"Color asked by by \" + ctx.message.author.name,\n icon_url=ctx.message.author.avatar_url)\n embed.add_field(name=\"Color Hex Value:\", value=request_result[\"hex\"][\"value\"], inline=False)\n embed.add_field(name=\"Color RGB Value:\", value=request_result[\"rgb\"][\"value\"], inline=False)\n embed.set_footer(text=self.botVariables.get_description(),\n icon_url=self.botVariables.get_bot_icon())\n await message_channel.send(embed=embed)\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"showcolor #COLORHEX/\\\"(R,G,B)\\\", for more see \"\n + self.command_prefix + \"help showcolor\")", "def test_chatbot_returns_answer_to_known_input(self):\n input_text = \"What... is your favourite colour?\"\n response = self.chatbot.get_response(input_text)\n\n self.assertIn(\"Blue\", response)", "def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')", "async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))", "async def choose(self, ctx, *, choices: str):\n await ctx.send(\n self.bot.bot_prefix + 'I choose: ``{}``'.format(random.choice(choices.split(\"|\"))))", "def dialogue(self, user_input, state, user_preferences):\n \n if user_input in [\"configure formal\", \"configure delay\", \"configure informal\", \"configure no delay\"]:\n self.configure(user_input)\n user_input=\"\"\n self.dialogue(user_input,state,user_preferences)\n \n time.sleep(self.delay)\n self.statelog.append([user_input,state]) #tuple of user utterance and its associated state. We use this to keep track of state jumps.\n \n if state == \"exit\":\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"Goodbye\")))\n return\n \n if state in (\"init\"):\n user_preferences = [0,0,0]\n user_input = input(\"Dialog Agent: \"+random.choice(self.responses.get(\"Welcome\"))+\"User: \")\n state = self.classification(user_input)\n self.dialogue(user_input, state, user_preferences)\n return\n \n if state in (\"inform\", \"reqalts\", 'hello'):\n extracted_preferences = self.preference_extractor(user_input)\n for i,d in enumerate(user_preferences):\n if d == 0:\n user_preferences[i] = extracted_preferences[i]\n \n state=\"fill_blanks\" #if more slots to be filled\n self.suggestions=self.lookup(user_preferences)\n\n if (len(self.suggestions)==0) or (len(self.suggestions)==1):\n \n state=\"answer\" #if there is none or 1 restaurant to suggest\n self.dialogue(user_input, state, user_preferences)\n return \n \n \n if state == \"fill_blanks\": #ask user for area/foodtype/pricerange\n grounding=self.grounding(user_preferences)\n if user_preferences[0] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Area\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"area\" not in user_input:\n user_input+=\" area\"\n if \"dont care\" in user_input:\n user_input='any area'\n elif user_preferences[1] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Price\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"price\" not in user_input:\n user_input+=\" price\"\n if \"dont care\" in user_input:\n user_input='any price'\n elif user_preferences[2] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Food\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"food\" not in user_input:\n user_input+=\" food\"\n if \"dont care\" in user_input:\n user_input='any food'\n else:\n state='ask_extra_preferences'\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state== 'ask_extra_preferences':\n state=self.ask_extra_preferences(user_preferences)\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state==\"confirmpreferences\":\n user_input = input(\"Dialog Agent: \"+random.choice(self.responses.get(\"AffirmPreferences\")).format(user_preferences[0],user_preferences[1],user_preferences[2])+\"User: \")\n accept = self.agree(user_input)\n if accept is True:\n self.suggestions = self.lookup(user_preferences)\n state = \"answer\"\n elif accept is False:\n state = \"inform\"\n user_input = \"\"\n user_preferences = [0,0,0]\n elif accept==\"reqalts\":\n user_preferences=[0,0,0]\n else: \n state = \"accept\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state == \"answer\": \n if self.suggestions: #found at least 1 restaurant\n user_input=input(\"Dialog Agent: \"+self.suggest_restaurant()+\"User: \")\n state = self.classification(user_input)\n if state in [\"ack\", \"affirm\"]:\n state = \"goodbye\"\n elif state in [\"reqalts\", \"reqmore\", \"deny\", \"negate\"]:\n state = \"answer\"\n else: #no restaurants found. Search for alternatives\n alternatives=self.get_alternative_restaurants(self.alternative_preferences(user_preferences))#offer alternatives\n if len(alternatives)==1: #found 1 alternative\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\"))+\"Let me look for an alternative for you...\\n\")\n self.suggestions=alternatives\n self.recommendation=self.suggestions[0]\n user_input=input(\"Dialog Agent: \"+self.suggest_restaurant()+\"User: \")\n if self.agree(user_input):\n self.get_restaurant_contacts(self.recommendation)\n state=\"goodbye\"\n elif alternatives: #found multiple alternatives\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\"))+\"Here is a list of alternatives:\")\n for a in alternatives:\n print(\"Dialog Agent: \"+self.get_restaurant_info(a))\n user_input = input(\"Dialog Agent: \"+'Would you like to choose one (1) or change your preferences(2)?\\n'+\"User: \")\n if user_input==\"1\":\n user_input=input(\"Dialog Agent: \"+\"Which one would you like to choose?\\n\"+\"User: \")\n for alternative in alternatives:\n if dt(user_input.lower(), alternative.lower())<3:# take into account misspellings\n self.recommendation=alternative\n state=\"thankyou\"\n elif user_input==\"2\":\n user_preferences=[0,0,0]\n state='inform'\n elif user_input==\"exit\":\n state='exit'\n else:\n print(\"Dialog Agent: \"+\"Please choose one of the two options\")\n else:#didnt find any alternative\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\")))\n user_preferences=[0,0,0]\n state='inform'\n user_input=\"\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state in [\"reqalts\",\"thankyou\", \"goodbye\", \"reset\"]:\n \n user_input=input(\"Dialog Agent: \"+self.get_restaurant_contacts(self.recommendation)+\". Would you like to finish here?\\n\"+\"User: \")\n\n if (self.classification(user_input) in (\"ack\",\"affirm\")):\n state=\"exit\"\n else:\n state=\"init\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state == \"repeat\":\n try:\n user_input = self.statelog[len(self.statelog) - 3][0]\n state = self.statelog[len(self.statelog) - 3][1]\n except IndexError:\n print(\"Dialog Agent: \"+\"Nowhere to go back, starting again\\n\")\n state = \"init\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n else:\n print(\"Dialog Agent: \"+\"I could not understand that, could you phrase it differently?\")#statelog[len(statelog) + 1][0]\n state = self.statelog[len(self.statelog) - 2][1]\n self.dialogue(user_input, state, user_preferences)\n return", "def decision():\n return random.choice(['GoToNormal','GoToSleep'])", "async def tips(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n user = ctx.author\r\n data = self.config\r\n def check(n):\r\n return n.author == user and n.channel == ctx.channel\r\n try:\r\n\r\n await ctx.send(\"{} Which Archetype's tips do you need?(Cycle, Beatdown, Control, Siege), type anything else to stop \".format(ctx.author.mention))\r\n archatype = await self.bot.wait_for('message', timeout=60, check=check)\r\n final_archatype = archatype.content.lower()\r\n\r\n if final_archatype == \"beatdown\":\r\n tips = await data.get_raw(\"beatdown\")\r\n await ctx.send(\"Use reaction menu to navigate through tips\")\r\n await menu(ctx, tips, DEFAULT_CONTROLS)\r\n\r\n elif final_archatype == \"cycle\":\r\n tips = await data.get_raw(\"cycle\")\r\n await ctx.send(\"Use reaction menu to navigate through tips\")\r\n await menu(ctx, tips, DEFAULT_CONTROLS)\r\n\r\n elif final_archatype == \"control\":\r\n tips = await data.get_raw(\"control\")\r\n await ctx.send(\"Use reaction menu to navigate through tips\")\r\n await menu(ctx, tips, DEFAULT_CONTROLS)\r\n\r\n elif final_archatype == \"siege\":\r\n tips = await data.get_raw(\"siege\")\r\n await ctx.send(\"Use reaction menu to navigate through tips\")\r\n await menu(ctx, tips, DEFAULT_CONTROLS)\r\n\r\n else:\r\n raise UserEnd\r\n\r\n except asyncio.exceptions.TimeoutError:\r\n await ctx.send(\"Timeout...\")\r\n return\r\n except UserEnd:\r\n await ctx.send(\"Stopped!\")\r\n return\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "async def choose(ctx, *choices: str):\n await ctx.send(random.choice(choices))", "async def slots(self, ctx: Message):\n\t\tfinal = []\n\t\tfor i in range(5):\n\t\t\ta = random.choice([\n\t\t\t \":redDogeHouse:\", \":OrangeDogeHouse:\", \":PurpleDogeHouse:\",\n\t\t\t \":CyanDogeHouse:\", \":CoolHouse:\"\n\t\t\t])\n\n\t\t\tfinal.append(a)\n\n\t\tif final[0] == final[1] and final[1] == final[2]:\n\t\t\treturn await self.send(\n\t\t\t f\"{ctx.author.mention} Triple! You won!ㅤ •ㅤ {final[0]} | {final[1]} | {final[2]}\"\n\t\t\t)\n\n\t\telif final[0] == final[1] or final[0] == final[2] or final[2] == final[\n\t\t 1]:\n\t\t\treturn await self.send(\n\t\t\t f\"{ctx.author.mention} You won!ㅤ •ㅤ {final[0]} | {final[1]} | {final[2]}\"\n\t\t\t)\n\t\telse:\n\t\t\treturn await self.send(\n\t\t\t f\"{ctx.author.mention} You lost!ㅤ •ㅤ {final[0]} | {final[1]} | {final[2]}\"\n\t\t\t)", "async def roulette(self, ctx):\n choices = [\"This is the end of the world\", \"And I don't know what to put here\"]\n await ctx.send(random.choice(choices))", "def run_example():\n num_die_sides = 6\n hand = (1,2,5,5,5)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "def TURN_OPTIONS() -> tuple:\n return \"Hit me! (Draw another Card)\", \"Stand (End round, stop drawing)\"", "def care_color_HH(selection):\n acids = \"{} and (resn ASP or resn ASH or resn GLU or resn GLH)\".format(selection)\n bases = \"{} and (resn HIS or resn HIE or resn HID or resn HIP or resn ARG \\\n or resn LYS or resn LYN)\".format(selection)\n polars = \"{} and (resn CYS or resn CYX or resn GLN or resn ASN or resn SER \\\n or resn TYR or resn THR)\".format(selection)\n nonpolars = \"{} and (resn GLY or resn ALA or resn LEU or resn ILE or resn PHE \\\n or resn TRP or resn MET or resn PRO or resn VAL)\".format(selection)\n cmd.color(\"firebrick\", acids)\n cmd.color(\"deepteal\", bases)\n cmd.color(\"tv_orange\", polars)\n cmd.color(\"smudge\", nonpolars)\n util.cnc(selection)", "async def cmd_choose(self, args: Args, **_):\n response = \"From what you gave me, I believe `{}` is the best choice\".format(\n args[randint(0, len(args) - 1)]\n )\n return response", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def user_action():\n\t### This is the function that takes and executes the users choices\n\twhile battle_on:\n\t\tchoosing = True\n\t\twhile choosing:\n\t\t\tmenu(\"general\")\n\t\t\tanswer()\n\t\t\tif ans == \"attack\":\n\t\t\t\tattack(my_pokemon, enemy)\n\t\t\t\tcalc_hp(enemy, \"attack\")\n\t\t\t\tshow_hp(enemy)\n\t\t\t\tprint \" \"\n\t\t\t\treturn\n\t\t\telif ans == \"flee\":\n\t\t\t\tchance = uniform(0, 100)\n\t\t\t\tif chance > 90:\n\t\t\t\t\twin(\"flee\")\n\t\t\t\telse:\n\t\t\t\t\tprint \"You failed to escape!\"\n\t\t\t\t\treturn\n\t\t\telif ans == \"potion\":\n\t\t\t\tuse_potion(my_pokemon)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint \"i dont know what you mean :)\"\n\t\t\t\tprint \"lets try again!\"\n\t\t\t\tchoosing = True", "def taketurn(self):\n # get my options from the game\n opts = self.game.options()\n rec_opt = self._primestrat.recommend(opts, self.board)\n if rec_opt is not None:\n self.implementstrategy(rec_opt)\n else:\n super().taketurn()", "def user_choices() -> True:\n message_box(\"EasyGUI examples.\\n\\nThis is a message box.\\n\\t\\t\\tAuthor: XX\")\n\n # This is a 'clever' technique!\n # Define the strings you need to use in a dictionaty,\n # and associate each string to a function name\n all_choices = {'Run my\\nfunction': my_function,\n 'Select a\\nfile': select_file,\n 'Show the demo': eg.egdemo}\n\n # Use Gui to select a choice\n choice = eg.buttonbox(msg=\"Select and action\",\n title=EG_TITLE+': choice',\n choices=list(all_choices.keys()),\n image=os.path.join('Images', 'qm.png'))\n\n # This is the clever bit!! Run the choice as a function\n all_choices[choice]()\n return True", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def ChoiceColor(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def plugin(self,ctx):\n special_case = {\"Anime\":\"myanimelist\",\"Anti Raid\":\"antiraid\"}\n plugin_setting = await self.redis.hgetall(\"{}:Config:Cogs\".format(ctx.message.guild.id))\n embed = discord.Embed()\n cogs = self.bot.cogs.keys()\n for x in cogs:\n setting = \tu\"\\U0001F534\" #red\n if x in (\"Core\", \"Remindme\", \"Tools\", \"REPL\",\"Events\"): # A Owner's thing only.\n if ctx.message.author.id != self.bot.owner.id:\n continue\n setting = u\"\\U0001F535\" #blue\n if x.lower() in plugin_setting or special_case.get(x) in plugin_setting:\n setting = \tu\"\\U0001F535\" #blue\n embed.add_field(name = x,value = setting)\n if ctx.message.guild.me.colour.value:\n embed.colour = ctx.message.guild.me.colour\n\n embed.set_footer(text = \"{} = Disable | {} = Enable\".format(u\"\\U0001F534\",u\"\\U0001F535\"))\n await ctx.send(embed=embed)", "def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2", "def is_rainbow(msg: str = 'I guess you are not my little pog champ :3'):\n\n async def check(ctx):\n rainbow = ctx.author.id == ctx.bot.owner_id\n if not rainbow:\n await ctx.send(msg)\n return rainbow\n\n return commands.check(check)", "async def rps(self, ctx, your_choice : RPSParser):\r\n author = ctx.message.author\r\n player_choice = your_choice.choice\r\n red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))\r\n cond = {\r\n (RPS.rock, RPS.paper) : False,\r\n (RPS.rock, RPS.scissors) : True,\r\n (RPS.paper, RPS.rock) : True,\r\n (RPS.paper, RPS.scissors) : False,\r\n (RPS.scissors, RPS.rock) : False,\r\n (RPS.scissors, RPS.paper) : True\r\n }\r\n\r\n if red_choice == player_choice:\r\n outcome = None # Tie\r\n else:\r\n outcome = cond[(player_choice, red_choice)]\r\n\r\n if outcome is True:\r\n await self.bot.say(\"{} You win {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n elif outcome is False:\r\n await self.bot.say(\"{} You lose {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n else:\r\n await self.bot.say(\"{} We're square {}!\"\r\n \"\".format(red_choice.value, author.mention))", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass", "def sense_and_act(self):\n pass", "def ask(self, question):\n\n\t\t# If you're just trying to test voice detection, you can uncomment\n\t\t# the following 5 lines. Bobby will guess \"yellow flashlight\" and will prompt\n\t\t# you to correct him by saying \"blue flashlight\"\n\n\t\t# fake_answers = [\"no\", \"yes\", \"yes\", \"yes\", \"no\", \"yes\", \"yes\"]\n\t\t# global count\n\t\t# count += 1\n\t\t# print question\n\t\t# return fake_answers[count - 1]\n\n\t\t# self.say(question)\n\t\t# #starts listening for an answer\n\t\t# self.asr.subscribe(\"TEST_ASR\")\n\t\t# data = (None, 0)\n\t\t# while not data[0]:\n\t\t# \tdata = self.mem.getData(\"WordRecognized\")\n\t\t# #stops listening after he hears yes or no\n\t\t# self.asr.unsubscribe(\"TEST_ASR\")\n\t\t#\n\t\t# print data\n\t\t#\n\t\t# for word in self.yes_no_vocab:\n\t\t# \tfor syn in self.yes_no_vocab[word]:\n\t\t# \t\tif data[0] == syn:\n\t\t# \t\t\treturn word", "async def choose(self, ctx, *vargs):\n\n embed = discord.Embed(\n title=\"Choose\",\n description=\"Choices: {}\\nChosen: **{}**\".format(str(vargs), random.choice(vargs)),\n color=discord.Color.blurple()\n )\n await ctx.send(embed=embed)", "def choice1(choice, ghost):\n if choice == \"1\":\n if \"flashlight\" not in items:\n print_pause(\"The Railway station is really foggy\", 2)\n print_pause(\"You can see absolute nothing\", 2)\n print_pause(\"You walk on and find a flashlight\", 2)\n light_choice(ghost)\n else:\n print_pause(\n \"You already pasted this way\\nPlease choose another way!\", 2)\n logic(ghost)", "def AIguessing(lijst):\n\n global Code\n global allcombos\n\n\n AIguess = choice(lijst)\n\n print(f\"The original code was {Code}\")\n print(f\"my guess this time is {AIguess}, how did I do?\")\n while not feedbackgiven:\n correct = int(input(\"Write down how many colors are in the right spot: \"))\n semicorrect = int(input(\"Write down how many colors are correct but not in the right spot: \"))\n\n feedback = correct + semicorrect\n if feedback <= 4:\n return NewFeedbackSystem(AIguess, correct, semicorrect, lijst)\n else:\n print(\"please use numbers 1-4 where the total <= 4\")\n continue", "async def _info_heist(self, ctx):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n themes = await self.thief.get_guild_theme(guild)\r\n\r\n if config[\"Hardcore\"]:\r\n hardcore = \"ON\"\r\n else:\r\n hardcore = \"OFF\"\r\n\r\n # Theme variables\r\n theme = config[\"Theme\"]\r\n t_jail = themes[\"Jail\"]\r\n t_sentence = themes[\"Sentence\"]\r\n t_police = themes[\"Police\"]\r\n t_bail = themes[\"Bail\"]\r\n\r\n time_values = [config[\"Wait\"], config[\"Police\"],\r\n config[\"Sentence\"], config[\"Death\"]]\r\n timers = list(map(self.thief.time_format, time_values))\r\n description = [\"Heist Version {}\".format(self.version), \"Theme: {}\".format(theme)]\r\n footer = \"Heist was developed by Redjumpman for Red Bot v2.\\nUpdated to v3 by Malarne\"\r\n\r\n embed = discord.Embed(colour=0x0066FF, description=\"\\n\".join(description))\r\n embed.title = \"{} Heist Settings\".format(guild.name)\r\n embed.add_field(name=\"Heist Cost\", value=config[\"Cost\"])\r\n embed.add_field(name=\"Base {} Cost\".format(t_bail), value=config[\"Bail\"])\r\n embed.add_field(name=\"Crew Gather Time\", value=timers[0])\r\n embed.add_field(name=\"{} Timer\".format(t_police), value=timers[1])\r\n embed.add_field(name=\"Base {} {}\".format(t_jail, t_sentence), value=timers[2])\r\n embed.add_field(name=\"Death Timer\", value=timers[3])\r\n embed.add_field(name=\"Hardcore Mode\", value=hardcore)\r\n embed.set_footer(text=footer)\r\n\r\n await ctx.send(embed=embed)", "def strategy(self, game, args=()):", "def state_choose_do(cfg, app, win, events):", "def _sense_and_act(self):\n pass", "def getPetalColor():\n return input(\"What color do you want the petals to be?\")", "def happy_color(health):\n if health > 0.8:\n return 'g'\n if health > 0.6:\n return 'y'\n return 'r'", "def rules():\r\n messagebox.showinfo(\"Rules\", \"The rules of the game is as follows. \\nYou can choose between rock, paper and scissors. \\nRock beats scissors, Paper beats rock, and Scissors beats paper.\")", "def respond_from_waiting(self, message, tags):\n self.stance = None\n self.used_arguments = []\n\n # Use tags and message to determine user stance, then define bot's stance as the opposite\n # If user is neutral/has no opinion, the bot will randomly choose between pro and con\n\n if 'veganism' in tags or 'anti_vegan_stance' in tags or 'pro_vegan_stance' in tags: #we might wanna delete this part, as it is unnecessary, the conversation is already about veganism\n for stance in self.STANCES:\n # If user is pro-vegan, bot takes anti-vegan stance\n if 'pro_vegan_stance' in tags:\n self.stance = 'anti_vegan'\n # print(\"is in pro vegan stance\")\n return self.go_to_state('anti_vegan_stance')\n\n # Determine the first argument the bot will use, add to used_arguments\n #\n #return self.go_to_state('anti_vegan_stance')\n\n # If user is anti-vegan, bot takes pro-vegan stance\n elif 'anti_vegan_stance' in tags:\n self.stance = 'pro_vegan'\n # print(\"is in anti vegan stance\")\n return self.go_to_state('pro_vegan_stance')\n\n # If user is neutral, bot chooses randomly between pro and anti vegan stances\n else:\n # Choose stance randomly\n self.stance = random.choice(STANCES)\n\n if self.stance == 'pro_vegan':\n return self.go_to_state('pro_vegan_stance')\n else:\n return self.go_to_state('anti_vegan_stance')\n\n elif 'thanks' in tags:\n return self.finish('thanks')\n else:\n return self.finish('confused')", "async def _play_heist(self, ctx):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.get_guild_theme(guild)\r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n await self.thief.check_server_settings(guild)\r\n await self.thief.check_member_settings(author)\r\n\r\n cost = config[\"Cost\"]\r\n wait_time = config[\"Wait\"]\r\n prefix = ctx.prefix\r\n\r\n # Theme Variables\r\n t_crew = theme[\"Crew\"]\r\n t_heist = theme[\"Heist\"]\r\n t_vault = theme[\"Vault\"]\r\n\r\n outcome, msg = await self.thief.requirement_check(prefix, author, cost)\r\n\r\n if outcome == \"Failed\":\r\n return await ctx.send(msg)\r\n\r\n if not config[\"Planned\"]:\r\n await bank.withdraw_credits(author, cost)\r\n config[\"Planned\"] = True\r\n await self.thief.config.guild(guild).Config.set(config)\r\n crew = await self.thief.add_crew_member(author)\r\n await ctx.send(\"A {4} is being planned by {0}\\nThe {4} \"\r\n \"will begin in {1} seconds. Type {2}heist play to join their \"\r\n \"{3}.\".format(author.name, wait_time, ctx.prefix, t_crew, t_heist))\r\n await asyncio.sleep(wait_time)\r\n \r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n if len(crew) <= 1:\r\n await ctx.send(\"You tried to rally a {}, but no one wanted to follow you. The \"\r\n \"{} has been cancelled.\".format(t_crew, t_heist))\r\n await self.thief.reset_heist(guild)\r\n else:\r\n await self.heist_game(ctx, guild, t_heist, t_crew, t_vault)\r\n\r\n else:\r\n await bank.withdraw_credits(author, cost)\r\n crew = await self.thief.add_crew_member(author)\r\n crew_size = len(crew)\r\n await ctx.send(\"{0} has joined the {2}.\\nThe {2} now has {1} \"\r\n \"members.\".format(author.display_name, crew_size, t_crew))", "async def choose_your_poison():\n return InteractionResponse(embed = Embed('Choose your poison'), components = CHOOSE_YOUR_POISON_ROW)", "def decide(self) :\n (self.futurX,self.futurY) = self.randomNextPos()\n if self.fishBreedTimeCPT == 0 :\n self.naissance = True\n self.fishBreedTimeCPT = self.fishBreedTime\n else :\n self.fishBreedTimeCPT = self.fishBreedTimeCPT - 1\n\n if self.env.grille[self.futurY][self.futurX] == None :\n self.bougera = True\n else :\n self.bougera = False\n\n self.update()", "async def randomChoice(self, ctx: commands.Context, *choices: str):\n if not choices:\n await ctx.reply(f\"Command failed - no arguments given.\\nEnter a sequence of arguments to choose from (you can use quotes for grouping).\", mention_author=False)\n elif len(choices)==1:\n await ctx.reply(f\"After some extremely randomized choosing from the one singular option that was given to choose from, the surprising result is:\\n{choices[0]}\", mention_author=False)\n else:\n await ctx.reply(f\"Randomly chosen result:\\n{random.choice(choices)}\", mention_author=False)", "def state_chosen_do(cfg, app, win, events):", "def opponent_hand(self):\r\n\r\n # 1 = rock, 2 = paper, 3 = scissors\r\n random_hand = random.randint(1, 3)\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.5)\r\n\r\n if random_hand == 1:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: ROCK\"\r\n self.opp_rock = True\r\n print(\"Opponent chose Rock.\")\r\n\r\n elif random_hand == 2:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: PAPER\"\r\n self.opp_paper = True\r\n print(\"Opponent chose Paper.\")\r\n\r\n elif random_hand == 3:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: SCISSORS\"\r\n self.opp_scissors = True\r\n print(\"Opponent chose Scissors.\")\r\n\r\n # Clear the opponent hand entry box\r\n self.opp_hand_entry.delete(0, \"end\")\r\n\r\n # Insert the value of the randomized hand of the opponent\r\n self.opp_hand_entry.insert(0, opp_hand_value)", "def categorize(bot, update):\n query = update.message.text.lower()\n if \"are you\" in query:\n message = (\"I am many things, compadre. I am a man, I am military trained in the art of war \"\n \"by Sun Tzu. I am trained in philosophy by Marcus Aurelius. I am trained in the culinary arts \"\n \"by Gordon Ramsay. I am the night. I am... a Butler.\"\n )\n send(bot, update, message)\n return True\n elif \"cowsay\" in query:\n if \"fortune\" in query:\n cowsay_fortune(bot, update)\n else:\n cowsay(bot, update, update.message.text)\n return True\n elif \"fortune\" in query:\n fortune(bot, update)\n return True\n elif \"stats\" in query or \"statistics\" in query or \"weather\" in query or \"report\" in query:\n send(bot, update, \"You want the weather report boss? No problemo.\")\n if \"one week\" in query or \"1 week\" in query:\n send(bot, update, \"Pulling data for a week.\")\n df_one_day = tempmon.filter_last_one_week()\n elif \"one month\" in query or \"1 month\" in query:\n send(bot, update, \"Pulling data for a month.\")\n df_one_day = tempmon.filter_last_one_month()\n elif \"six months\" in query or \"6 months\" in query:\n send(bot, update, \"Pulling data for six months.\")\n df_one_day = tempmon.filter_last_six_months()\n else:\n send(bot, update, \"Pulling data for a day.\")\n df_one_day = tempmon.filter_last_one_day()\n df_one_day.set_index(keys=\"timestamp\",inplace=True)\n fig, axes = plt.subplots(nrows=4, ncols=1, figsize=(9, 20))\n df_one_day[\"temperature_h\"].plot(ax=axes[0])\n axes[0].set_title(\"Temperature (Humidity Based) [°C]\")\n df_one_day[\"temperature_p\"].plot(ax=axes[1])\n axes[1].set_title(\"Temperature (Pressure Based) [°C]\")\n df_one_day[\"humidity\"].plot(ax=axes[2])\n axes[2].set_title(\"Humidity [%]\")\n df_one_day[\"pressure\"].plot(ax=axes[3])\n axes[3].set_title(\"Pressure [millibar]\")\n plt.tight_layout()\n fig.savefig(\"stats.png\",dpi=220)\n bot.send_photo(\n chat_id=update.message.chat_id, \n photo=open(\"stats.png\",\"rb\"))\n return True\n elif \"headache\" in query or \"migraine\" in query:\n send(\n bot, \n update, \n \"Hmm, you want to know if you're going to get a headache or migraine today? Give me a moment, boss. Let me check my completely valid WebMD degree in neurology.\")\n df = tempmon.filter_last_one_day()\n min_pressure = df[\"pressure\"].min()\n max_pressure = df[\"pressure\"].max()\n message = \"The pressure varied from {:.3f} millibar to {:.3f} millibar in the last 24 hours.\".format(min_pressure, max_pressure)\n send(bot, update, message)\n message = \"Let me analyse the last one week.\"\n send(bot, update, message)\n df2 = tempmon.filter_last_one_week()\n end_time = df2[\"timestamp\"].max() - datetime.timedelta(minutes=24*60)\n df2 = df2.loc[df2.timestamp <= end_time]\n min_p = df2[\"pressure\"].min()\n max_p = df2[\"pressure\"].max()\n message = \"The pressure varied from {:.3f} millibar to {:.3f} millibar in the last week, not including the last 24 hours.\".format(min_p, max_p)\n send(bot, update, message)\n range_p_1d = max_pressure - min_pressure\n range_p_1w = max_pressure - min_pressure\n if math.isclose(range_p_1d, range_p_1w, abs_tol=1e-2) or (range_p_1d > range_p_1w):\n message = \"The pressure has varied a little too wildly today compared to this week in general. The range is {:.3f} millibar. You might get a headache.\".format(range_p_1d)\n else:\n message = \"The pressure variation today has been a little less compared to earlier in the week. You might not get a headache.\"\n send(bot, update, message) \n message = \"Uh, you know I don't have my MD yet, right boss?\"\n send(bot, update, message)\n\n elif \"temperature\" in query:\n df_one_day = tempmon.filter_last_one_day()\n message = \"Mean Temperature in the past 24h was \\n{:.4f}°C (Pressure based)\\n{:.4f} °C (Humidity based)\".format(df_one_day[\"temperature_p\"].mean(), df_one_day[\"temperature_h\"].mean())\n send(bot, update, message)\n return True\n elif \"humidity\" in query:\n df_one_day = tempmon.filter_last_one_day()\n message = \"Mean humidity in the past 24h was {:.4f}%\".format(df_one_day[\"humidity\"].mean())\n send(bot, update, message)\n return True\n elif \"pressure\" in query:\n df_one_day = tempmon.filter_last_one_day()\n message = \"Mean pressure in the past 24h was {:.4f} millibar\".format(df_one_day[\"pressure\"].mean())\n send(bot, update, message)\n return True\n elif \"xkcd\" in query:\n comic = xkcd.getRandomComic()\n response = \"[{}]({})\\n{}\".format(comic.title, comic.imageLink, comic.altText)\n bot.send_message(\n chat_id=update.message.chat_id, \n text=response,\n parse_mode=telegram.ParseMode.MARKDOWN)\n return True\n return None", "def get_user_input(self, game, hand, message, allowed_actions):\n if self.first_turn:\n hand = self.hands[0]\n if hand.cards == 2:\n card1, card2 = hand.cards\n if card1.get_value() == card2.get_value():\n return 'split'\n return 'double'\n else:\n return 'stand'", "async def choices(self, ctx, *, options):\n choices = options.split('-')\n choice = random.choice(choices)\n await ctx.send(f'My choice is\\\"{choice}\\\"')", "def hellraiser_weak():\r\n\r\n print(\"\\n\\nThe Hell Raiser pulls you back on the floor...\\n\\nLucifer trying to splash Holy water from jug kept on dining table...\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\n\\nYou are battling hard with devil to save your life...\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\n\\nLucifer tries to splash water on devil's face!\")\r\n \r\n time.sleep(2)\r\n \r\n holywater = input(\"\\n\\nType 'splash' to splash holy water: \")\r\n\r\n if holywater.lower() == \"splash\": # Create condition and compare if input from player matches the word, \"SPLASH\"\r\n\r\n print(\"\\n\\nLucifer splashes water on devil...\\n\\n Hell Raiser - 'Lucifer, stop it! You are getting crazy, don't splash me,, I can't survive!'\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\n\\nHell Raiser now utilises his next skill - Fireballs!\")\r\n time.sleep(3)\r\n\r\n print(\"\\n\\n Hell Raiser - 'If you really want to make this uglier, take this!!!' \\n\\n\\n ***FIREBALLS*** ***FIREBALLS*** ***FIREBALLS***\")\r\n\r\n time.sleep(2)\r\n\r\n print(\"\\n\\nHelp Lucifer activate his double shield\")\r\n\r\n shield = input(\"\\nType 'shield' to enable: \")\r\n\r\n if shield == \"shield\": #Comparing player's response with \"SHIELD\"\r\n\r\n print(f\"\\n\\n Lucifer - '{name.title()}, speak out CHRISTO, it will make him weak!'\")\r\n\r\n christo = input(\"\\nType 'christo' to retaliate against Hell Raiser: \")\r\n\r\n if christo.lower() == \"christo\":\r\n\r\n print(f\"\\n Lucifer - 'Great, {name.title()}!, I will now emitt High Frequency noises on him...'\")\r\n\r\n time.sleep(2)\r\n\r\n high_noise = input(\"\\nType 'high' to activate Lucifer's noise: \")\r\n\r\n if high_noise.lower() == \"high\": # Comparing player's response with HIGH.\r\n \r\n print(\"\\n\\n Hell Raiser - 'Aaahhh! Stop this please! Lucifer, this is exhaustive...stop this noise! I am shaking and getting incapacitated!'\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nDevil's health deteriorates, he avoids further impact from Lucifer and vanishes again...\")\r\n\r\n time.sleep(3)\r\n\r\n # After Lucifer emits the noise, devil diappears. The player and Lucifer then heading to Blue room.\r\n \r\n\r\n print(\"\\nYou and Lucifer finally climb upstairs towards Blue Room.\")\r\n \r\n\r\n print(\"\\n\\t\\t\\t***STAIRCASE***\")\r\n\r\n time.sleep(3)\r\n\r\n staircase = \"\\n\\t\\t\\t\\t\\t _____ ***BLUE ROOM*** \\n\\t\\t\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t\\t\\t _|\\n\\t\\t\\t\\t _|\\n\\t **BLACK ROOM** _____|\"\r\n\r\n print(staircase)\r\n\r\n time.sleep(3)\r\n\r\n # On reaching the Blue room, they face another challenge to solve a riddle to enter the room.\r\n\r\n print(\"\\n\\nNow that you have reached Blue Room, there is a catch! \\nYou will have to solve riddle written on sticky note hung on doorknob.\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\n\\nSolve the riddle given below: \")\r\n\r\n # Creating blueroom_riddle variable to display the riddle question and get input from player\r\n\r\n blueroom_riddle = input(\"\\n\\n\\tWhat Demands an Answer, But Ask no Questions?: \")\r\n\r\n if blueroom_riddle == \"telephone\": # Checking if player enters \"telephone\" as the answer\r\n\r\n print(f\"\\n\\n{blueroom_riddle.title()} is correct! You have now entered the room.\")\r\n \r\n\r\n ## If written TELEPHONE, they enter room and witness the Ancient Green Glass Box.\r\n \r\n\r\n print(\"\\n\\nAs you and Lucifer enter room, you are amazed to see the Ancient Green Glass Box resting on the floor.\")\r\n\r\n # Creating a variable to represent the box.\r\n\r\n ancient_box = \"\\n\\n\\t\\t\\t ***GREEN BOX*** \\n\\t\\t\\t\\t ___________\\n\\t\\t\\t\\t| |\\n\\t\\t\\t\\t| |\\n\\t\\t\\t\\t|___________|\"\r\n\r\n # Create a variable to represent floor.\r\n \r\n floor = \"\\n\\t\\t\\t\\t------------- \\n\\t\\t\\t\\t FLOOR \\n\\t\\t\\t\\t-------------\"\r\n\r\n # Print both.\r\n\r\n print(ancient_box)\r\n\r\n print(floor)\r\n\r\n time.sleep(3)\r\n\r\n ## The player meets the witch, Kijo, who demands to answer her 3 questions from the player to retain the box and win the game.\r\n \r\n print(\"\\nAlthough the box is at front of you, you encounter a witch, named Kijo.\")\r\n \r\n time.sleep(5)\r\n\r\n # Kijo welcoming the player.\r\n\r\n print(f\"\\n\\n Kijo - 'Welcome to the Blue room, {name.title()}! You proved to be a valiant human to reach this far.\\n\\n\\tHowever, you will have to answer 3 of my questions to get to the box.'\")\r\n\r\n time.sleep(5)\r\n\r\n # Kijo's conditions for each riddle...\r\n \r\n print(\"\\n\\nKijo will ask you three riddles... \\n\\nYou have 3 attempts to answer first riddle, 2 for second, and only 1 for the third.\\n\\nEach correct answer will bring you 3 steps closer to the box.\")\r\n\r\n time.sleep(5)\r\n\r\n \r\n print(\"\\n\\nBe mindful that failure to give correct answer at any stage will force you to get locked out of room forever. \\n\\nYou will not get the box and the game will be over.\")\r\n\r\n time.sleep(5)\r\n\r\n # Creating input statement for player to answer the questions\r\n\r\n yes = input(\"\\nType yes if ready to answer: \")\r\n\r\n if yes.lower() == \"yes\":\r\n\r\n ## First riddle:\r\n\r\n print(f\"\\nKijo - 'Okay, {name.title()}!, here is your first question:'\")\r\n\r\n guessestaken = 0 # Number of guesses taken by player is stored here.\r\n\r\n # Create the variable to print the first question\r\n\r\n firstriddle = print(\"\\nI Am Heavy And Hard To Pick Up, But Backwards I Am Not. What Am I?\")\r\n \r\n \r\n\r\n while guessestaken < 3: # Create While loop - the condition states that unless the player gives write answer to question,\r\n # offer 3 attempts\r\n \r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n \r\n answer = str(input()) # The variable created for player's input. This input stores player's answer and convert into string.\r\n\r\n guessestaken = guessestaken + 1 # Instruct the system to update the guesses taken by the player.\r\n \r\n\r\n # Creating conditional statements for player's answer.\r\n\r\n if answer.lower() == \"ton\":\r\n\r\n # If he answers \"TON\", then print the below statement.\r\n\r\n print(f\"\\n\\n{answer.title()} is correct! You move 3 steps closer to box. Solve next question.\")\r\n\r\n time.sleep(3)\r\n\r\n # After successfully completing first question, head on to second question:\r\n \r\n\r\n ## Second riddle:\r\n\r\n # Kijo appreciating the player:\r\n\r\n print(f\"\\n\\n Kijo - 'Well done, {name.title()}! Here is your next question:'\")\r\n\r\n nextguess = 0 # Number of guesses taken by player for the second question's answer\r\n\r\n # Print the second question:\r\n\r\n secondriddle = print(\"\\nWhat work of writing can one never finish?\")\r\n \r\n\r\n while nextguess < 2: # Creating While loop for second question, this time with 2 attempts.\r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n\r\n secondanswer = str(input()) # Create input for second answer\r\n\r\n nextguess = nextguess + 1 # Update the guesses\r\n\r\n # Create Conditional Statements for Second Question:\r\n \r\n\r\n if secondanswer.lower() == \"autobiography\": # If player's answer is \"AUTOBIOGRAPHY', execute following:\r\n \r\n print(f\"\\n\\n{secondanswer.title()} is correct! You move 3 steps more closer to box. Solve last question.\")\r\n\r\n time.sleep(3)\r\n\r\n # If successful, then head on to last question:\r\n\r\n ## Third riddle:\r\n\r\n print(f\"\\n\\n Kijo - 'Great, {name.title()}! Solve the final question to retain the box:'\")\r\n\r\n finalguess = 0 # Number of guesses taken by player for final question's answer\r\n\r\n # end = '' used to continue printing the next lines of strings together in console.\r\n\r\n print(\"\\n\\n\\t\\t CAN YOU OPEN THIS LOCK ?\", end='')\r\n print(\" \\n\\n\\n\\t\\t 206: Two digits are right but both are in the wrong place\", end='')\r\n print(\" \\n\\n\\t\\t 738: All Digits are wrong\", end='')\r\n print(\" \\n\\n\\t\\t 380: One digit is right but in the wrong place\",end='')\r\n print(\" \\n\\n\\t\\t 682: One digit is right and in its place\", end='')\r\n print(\" \\n\\n\\t\\t 614: One digit is right but in the wrong place\", end='')\r\n\r\n while finalguess < 1: # Create While loop with only 1 attempt...\r\n\r\n print(\"\\nGive Your Answer\\n\")\r\n\r\n finalanswer = str(input())\r\n\r\n finalguess = finalguess + 1\r\n\r\n if finalanswer.lower() == \"042\": # Check and Compare player's answer with \"042\".\r\n \r\n\r\n print(f\"\\n\\n{finalanswer.title()} is absolutely correct!\")\r\n \r\n time.sleep(3)\r\n \r\n print(f\"\\n\\n Kijo - 'Congratulations, {name.upper()}!!! You finally were able to retain the box!!! \\n\\n\\t\\t***YOU WIN :)***\")\r\n\r\n sys.exit() # Exit the game.\r\n\r\n \r\n\r\n if finalanswer.lower() != \"042\": # Creating Conditions if player's answer for third question does not match \"042\"\r\n\r\n # Print following statements and end the game.\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nYou are locked out of room! \\n\\nGave Over!\")\r\n\r\n sys.exit()\r\n\r\n \r\n\r\n if secondanswer.lower() != \"autobiography\": # Creating similar Condition for Second Question, if player's answer does not match \"AUTOBIOGRAPHY\"\r\n\r\n # Print the following and end the game.\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n \r\n print(\"\\nYou are locked out of room! \\n\\nGame Over!\")\r\n\r\n sys.exit()\r\n \r\n\r\n if answer.lower() != \"ton\": # Finally, creating the Condition for First Question if player's answer not matched with \"TON\"\r\n\r\n # Print the following statements and end the game\r\n \r\n print(\"\\nSorry, you did not give the correct answer :(\")\r\n\r\n time.sleep(3)\r\n\r\n print(\"\\nYou are locked out of room! \\n\\nGame Over!\")\r\n\r\n sys.exit()\r\n \r\n else: # If to open Blue room, the player writes answer other than \"TELEPHONE\", restart the game.\r\n\r\n print(\"You gave wrong answer, sorry but you will have to restart the game :(\")\r\n\r\n sys.exit()", "def user(num):\n while True:\n print(\"Option: {}\".format(num))\n\n line = input()\n\n try:\n if line[0] == 'h':\n print(help_message)\n else:\n white, black = map(int, line.split())\n return white, black\n except:\n print(invalid_option.format(line))", "async def thinking(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"thinking\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "async def best():\n await bot.say('Nargacuga is the best Monster. Are you casual?')", "def on_ur_choose_ok_btn_clicked(self):\n ur_type = self.ur_choose_box.currentText()\n self.ur.set_UR_ROBOT(ur_type)\n self.set_ur_info_txt(\"set UR type: \" + ur_type )", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def ask_player_config(question: str, player: Player) -> BrainType:\n player_type = ask_question(question, [\n [\"Human\", \"Min-Max Easy\", \"Alpha-Beta Easy\"],\n [None, \"Min-Max Medium\", \"Alpha-Beta Medium\"],\n [None, \"Min-Max Expert\", \"Alpha-Beta Expert\"]\n ])\n\n estimation = None \n if player_type != \"Human\":\n estimation = ask_question(\"Choose Estimation\", [\n [\"simple\", \"advanced\"]\n ])\n \n global is_DOG_AI, is_RAB_AI\n if player_type != 'Human':\n if player == S_DOG:\n is_DOG_AI = True\n else:\n is_RAB_AI = True\n\n return getBrain(player_type, estimation)", "def get_user_input(self, game, hand, message, allowed_actions):\n return 'hit'", "def decide_action(self):\n recognizer, audio = self.speech.listen_for_audio()\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\n\n if speech is not None:\n try:\n requests.get(\"http://localhost:8080/clear\")\n if 'monica' in speech or 'Monica' in speech or 'monika' in speech or \"Monika\" in speech:\n######## EMAIL ###########\n if 'email' in speech or 'Email' in speech:\n\t problems = sendemail(from_addr = \"[email protected]\",\n to_addr_list = ['[email protected]'],\n \t cc_addr_list = ['[email protected]'],\n subject = 'Class notes',\n message = 'hello',\n login = 'ad9001055',\n password = 'dubey!@#$') \n print problems\n return\n######################\n r = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\n print r.text\n json_resp = json.loads(r.text)\n entities = None\n intent = None\n if 'entities' in json_resp and 'Intent' in json_resp['entities']:\n entities = json_resp['entities']\n intent = json_resp['entities']['Intent'][0][\"value\"]\n\n print intent\n if intent == 'greeting':\n self.__text_action(self.nlg.greet())\n elif intent == 'weather':\n self.__weather_action(entities)\n elif intent == 'maps':\n self.__maps_action(entities)\n elif intent == 'appreciation':\n self.__appreciation_action()\n return\n else: # No recognized intent\n print speech\n #self.__text_action(\"I am sorry, I dont know about that yet\");\n # elif speech == 'kannada':\n # target_language = \"kn\"\n # elif speech == 'urdu':\n # target_language = \"ur\"\n # elif speech == 'tamil':\n # target_language = \"ta\"\n # elif speech == 'bengali':\n # target_language = \"bn\"\n#TRANSALATE\n else:\n t_r = requests.get('https://translate.yandex.net/api/v1.5/tr.json/translate?key=trnsl.1.1.20161210T135343Z.01cdf9d8fa100c94.c04eb36979d30f2e792953e790efc391db86ed80&text=%s&lang=en-%s&format=plain&' % (speech, target_language) )\n print t_r.text\n tr_json_resp = json.loads(t_r.text)\n final_string = tr_json_resp[\"text\"][0]\n self.__text_action(final_string)\n target = open(notes_file_name, 'ab+')\n target.write(speech)\n target.write(\"\\n\")\n target.close()\n#TRANSALTE\n except Exception as e:\n print \"Failed wit!\"\n print(e)\n traceback.print_exc()\n #self.__text_action(\"I'm sorry, I couldn't understand what you meant by that\")\n return\n #self.decide_action()", "def run_im_bored():\n \n greet_user()\n \n bored = True\n \n while bored:\n generate_suggestion()\n bored = ask_to_continue()", "def GoTo(self):\n if self.state == 'normal':\n return self.backUser()\n \n print(r\"\"\"Please enter a specific color to reach the desired room:\n\n - blue -> entrance\n - red -> closet\n - green -> living room\n - yellow -> kitchen\n - magenta -> bathroom\n - black -> bedroom\n \"\"\")\n\n color = raw_input('Color: ')\n if color in self.color:\n self.msg_play.play = False\n self.msg_play.color = color\n self.play_pub.publish(self.msg_play)\n rospy.loginfo(\"color sent\")\n self.state = ''\n else:\n print('Command Unknown') \n return self.GoTo()", "def pick_action(self, equity, to_call, pot_odds):\n # action to us: check or bet\n if to_call == 0:\n # lock hands - 1/3 of the time make a small bet instead of a big one\n if equity > 90 and self.r_test(0.33, 'lock_trap'):\n self.make_bet(self.minimum_bet(\"trap1\"))\n elif equity > 65 or (equity > 40 and self.r_test(0.03, 'c1')):\n self.make_bet(self.big_raise(\"R1\"))\n elif equity > 55 or self.r_test(0.02, 'c2'):\n self.make_bet(self.minimum_bet(\"R2\"))\n else:\n self.bot.check()\n # TODO: combine these and make them aware of button\n # use pot odds to call/bet/fold\n else:\n return_ratio = equity / pot_odds\n self.bot.log(\" return ratio={:.3f}\".format(return_ratio))\n if equity > 70 or (equity > 40 and self.r_test(0.03, 'po1')):\n self.make_bet(self.big_raise(\"R3\"))\n elif to_call < self.data.big_blind and \\\n (equity > 55 or self.r_test(0.03, 'po2')):\n # small preflop raise from SB, get more money into the pot\n self.make_bet(self.minimum_bet(\"R4\"))\n elif return_ratio > 1.25:\n self.bot.log(\" return ratio > 1.25, calling {}\".format(to_call))\n self.bot.call(to_call)\n elif return_ratio > 1 \\\n and MathUtils.percentage(to_call, self.our_stack()) < 10:\n self.bot.log(\" return ratio > 1 and bet is small, calling {}\"\n .format(to_call))\n self.bot.call(to_call)\n else:\n self.bot.fold()", "async def phrase(self, ctx):\n await self.heleus.send_command_help(ctx)", "async def _spia(self,ctx: commands.Context):\n responses = ['✈️Sono un fottuto aereo✈️', 'scusate mi stanno chiamando📞','🔇🎧','🎶Bitches along my dick 🎶']\n await ctx.send(random.choice(responses))", "async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))", "def type_determine(self):\n\n if self.data_type == \"ECG\" or self.data_type == \"ENR\":\n self.curve_channel2 = self.ECGWinHandle.plot(self.display_channel2, pen=self.pen)\n self.curve_channel1 = self.RespirationWinHandle.plot(self.display_channel1, pen=self.pen)\n self.two_channel = True\n if self.data_type == \"ECG\":\n self.channel1_type = \"RESP\"\n self.channel2_type = \"ECG\"\n else:\n self.channel1_type = \"RESP\"\n self.channel1_type = \"ECG\"\n else:\n self.curve_channel2 = self.PulseWaveWinHandle.plot(self.display_channel2, pen=self.pen)\n self.curve_channel1 = None\n self.two_channel = False\n self.channel2_type = \"PULSE\"", "async def hello(self, ctx):\n await ctx.send(random.choice(self.greetings))", "def hook_greet(self):\n ui.greet()", "def choose_option(friendly,enemy,opt1=\"Fight\",opt2=\"Bag\",opt3=\"Pokemon\",opt4 = \"Run\"):\n background_color = blit_background()[1]\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n pygame.display.update()\n pause(friendly,enemy,3) #to stop the click from 1st menu selecting option in second\n mouse_pos = 0,0\n while True:\n event_check(False, friendly,enemy)\n blit_background()\n opt_1 = pygame.draw.rect(screen,((background_color)),(60,540,300,70))\n blit_text(opt1,(70,545))\n opt_3 = pygame.draw.rect(screen,(background_color),(60,615,300,70))\n blit_text(opt2,(70,620))\n opt_2 = pygame.draw.rect(screen,(background_color),(360,540,300,70))\n blit_text(opt3,(370,545))\n opt_4 = pygame.draw.rect(screen,(background_color),(360,615,300,70))\n blit_text(opt4,(370,620))\n mouse_pos = get_click()\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n blit_text(\"What will you do?\",(800,580))\n pygame.display.update()\n if opt_1.collidepoint(mouse_pos):\n option = 1\n break\n elif opt_2.collidepoint(mouse_pos):\n option = 2\n break\n elif opt_3.collidepoint(mouse_pos):\n option = 3\n break\n elif opt_4.collidepoint(mouse_pos):\n option = 4\n break\n pygame.display.update()\n return option", "def choice(self):\n method = input(\"Enter e for encipher or d for decipher: \")\n if method == 'e':\n self.encipher()\n elif method == 'd':\n self.decipher()\n else:\n print(\"Invalid option! Learn to type...\")", "def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice", "async def house(self):\n\t\tcolors = {\n\t\t\t'Brown': [1, 3], 'Light Blue': [6, 8, 9],\n\t\t\t'Pink': [11, 13, 14], 'Orange': [16, 18, 19],\n\t\t\t'Red': [21, 23, 24], 'Yellow': [26, 27, 29],\n\t\t\t'Green': [31, 32, 34], 'Dark Blue': [37, 39]\n\t\t}\n\t\thouseable = []\n\t\tfor color in colors:\n\t\t\t#all owned by the current player\n\t\t\tif not all(self.ownedby[prop] == self.p for prop in colors[color]):\n\t\t\t\tcontinue\n\t\t\t#no props are mortgaged\n\t\t\tif any(self.ismortgaged[prop] for prop in colors[color]):\n\t\t\t\tcontinue\n\t\t\thouseable.append(color)\n\t\tif not houseable:\n\t\t\treturn 'You do not have any properties that are eligible for houses.\\n'\n\t\tmsg = ''\n\t\twhile True:\n\t\t\tmsg += '```\\nid price color\\n'\n\t\t\ti = 0\n\t\t\tfor color in houseable:\n\t\t\t\tmsg += '{:2} {:5d} {}\\n'.format(i, HOUSEPRICE[colors[color][0]], color)\n\t\t\t\ti += 1\n\t\t\tmsg += (\n\t\t\t\t'```Type the ID of the color group you want to manage.\\n'\n\t\t\t\tf'You have ${self.bal[self.p]}\\n`d`: Done'\n\t\t\t)\n\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\tawait self.ctx.send(msg)\n\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t'message',\n\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\tcheck=lambda m: (\n\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\tand m.content.lower() in [str(x) for x in range(len(houseable))] + ['d']\n\t\t\t\t)\n\t\t\t)\n\t\t\tchoice = choice.content.lower()\n\t\t\tif choice == 'd':\n\t\t\t\tbreak\n\t\t\tchoice = int(choice)\n\t\t\tprops = colors[houseable[choice]]\n\t\t\t#start off with the current values\n\t\t\tnew_values = []\n\t\t\tfor a in props:\n\t\t\t\tnew_values.append(self.numhouse[a])\n\t\t\tmsg = ''\n\t\t\twhile True:\n\t\t\t\tmsg += '```\\nid numh name\\n'\n\t\t\t\ti = 0\n\t\t\t\tfor a in props:\n\t\t\t\t\tmsg += '{:2} {:4} {}\\n'.format(i, new_values[i], TILENAME[a])\n\t\t\t\t\ti += 1\n\t\t\t\tmsg += (\n\t\t\t\t\t'```Type the ID of the property you want to change.\\n'\n\t\t\t\t\t'`c`: Confirm\\n`e`: Exit without changing'\n\t\t\t\t)\n\t\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\t\tawait self.ctx.send(msg)\n\t\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t\tand m.content.lower() in [str(x) for x in range(len(props))] + ['c', 'e']\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\tchoice = choice.content.lower()\n\t\t\t\tif choice == 'e':\n\t\t\t\t\tmsg = ''\n\t\t\t\t\tbreak\n\t\t\t\tif choice == 'c':\n\t\t\t\t\tif max(new_values) - min(new_values) > 1:\n\t\t\t\t\t\tmsg = 'That is not a valid house setup.'\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttest = self.numhouse[:] \n\t\t\t\t\tfor a in range(len(new_values)):\n\t\t\t\t\t\ttest[props[a]] = new_values[a]\n\t\t\t\t\thouseLimit = await self.cog.config.guild(self.ctx.guild).houseLimit()\n\t\t\t\t\ttotal_houses = sum(x for x in test if x in (1, 2, 3, 4))\n\t\t\t\t\tif total_houses > houseLimit and houseLimit != -1:\n\t\t\t\t\t\tmsg = (\n\t\t\t\t\t\t\t'There are not enough houses for that setup.'\n\t\t\t\t\t\t\tf'\\nMax houses: `{houseLimit}`\\nRequired houses: `{total_houses}`\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\thotelLimit = await self.cog.config.guild(self.ctx.guild).hotelLimit()\n\t\t\t\t\ttotal_hotels = sum(1 for x in test if x == 5)\n\t\t\t\t\tif total_hotels > hotelLimit and hotelLimit != -1:\n\t\t\t\t\t\tmsg = (\n\t\t\t\t\t\t\t'There are not enough hotels for that setup.'\n\t\t\t\t\t\t\tf'\\nMax hotels: `{hotelLimit}`\\nRequired houses: `{total_hotels}`\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tcontinue \n\t\t\t\t\tchange = 0\n\t\t\t\t\tfor a in range(len(new_values)):\n\t\t\t\t\t\tchange += new_values[a] - self.numhouse[props[a]]\n\t\t\t\t\tif change == 0:\n\t\t\t\t\t\tmsg = 'No houses were changed.\\n'\n\t\t\t\t\t\tbreak\n\t\t\t\t\tprice = abs(change) * HOUSEPRICE[props[0]]\n\t\t\t\t\tif price > self.bal[self.p] and change > 0:\n\t\t\t\t\t\tmsg = 'You cannot afford to buy that many houses.\\n'\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif abs(change) == 1:\n\t\t\t\t\t\tplural = ''\n\t\t\t\t\telse:\n\t\t\t\t\t\tplural = 's'\n\t\t\t\t\tif change > 0:\n\t\t\t\t\t\tawait self.ctx.send(\n\t\t\t\t\t\t\tf'Are you sure you want to buy {change} house{plural}? (y/n) '\n\t\t\t\t\t\t\tf'It will cost ${price} at ${HOUSEPRICE[props[0]]} per house.'\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tawait self.ctx.send(\n\t\t\t\t\t\t\tf'Are you sure you want to sell {abs(change)} house{plural}? (y/n) '\n\t\t\t\t\t\t\tf'You will get ${price // 2} at '\n\t\t\t\t\t\t\tf'${HOUSEPRICE[props[0]] // 2} per house.'\n\t\t\t\t\t\t)\n\t\t\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t\t\t'message',\n\t\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t\t\tand m.content.lower() in ('y', 'yes', 'n', 'no')\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tchoice = choice.content[0].lower()\n\t\t\t\t\tif choice == 'n':\n\t\t\t\t\t\tmsg = ''\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tfor a in range(len(new_values)):\n\t\t\t\t\t\tself.numhouse[props[a]] = new_values[a]\n\t\t\t\t\tif change > 0:\n\t\t\t\t\t\tself.bal[self.p] -= price\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.bal[self.p] += price // 2\n\t\t\t\t\tmsg = f'You now have ${self.bal[self.p]}.'\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tchoice = int(choice)\n\t\t\t\t\tawait self.ctx.send(\n\t\t\t\t\t\tf'How many houses do you want on {TILENAME[props[choice]]}?\\n`c`: Cancel'\n\t\t\t\t\t)\n\t\t\t\t\tvalue = await self.bot.wait_for(\n\t\t\t\t\t\t'message',\n\t\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t\t\tand m.content.lower() in [str(x) for x in range(6)] + ['c']\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tvalue = value.content.lower()\n\t\t\t\t\tif value == 'c':\n\t\t\t\t\t\tmsg = ''\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tvalue = int(value)\n\t\t\t\t\tnew_values[choice] = value\n\t\t\t\t\tmsg = ''\n\t\treturn ''", "async def horse_race(self, ctx, arg:int, bets=bets):\n # betting boilerplate checks to be inserted later\n if verification.is_user(ctx.author.id):\n if verification.has_funds(ctx.author.id, arg) and not bets:\n embed = discord.Embed(color=0xf0eec0, title=\"Horse Race 🏇\")\n embed.add_field(name=\"Betting Phase\", value=\"React with a suit to bet on it. The bet amount is set by the user who started the game. You may only bet on one suit.\", inline=False)\n embed.add_field(name=\"Betting closes in 30 seconds!\", value=f\"Bets are <:chip:657253017262751767> **{arg}** chips per person\", inline=False)\n betting_phase = await ctx.send(content=None, embed=embed)\n racers = ['♦️', '♥️', '♠️', '♣️']\n for reaction in racers:\n await betting_phase.add_reaction(emoji=reaction)\n # Waits 30 seconds before locking in the bets\n await asyncio.sleep(30)\n await betting_phase.clear_reactions()\n standings = {'♦️':0, '♥️':0, '♠️':0, '♣️':0}\n embed.add_field(name=\"Betting has been closed.\", value=\"The race is starting now!\", inline=False)\n status = await ctx.send(content=None, embed=embed)\n # If the user who reacted does not have the funds or is not registered\n for bet in bets:\n if not verification.is_user(bet) or not verification.has_funds(bet, arg):\n del bets[bet]\n await ctx.send(f\"{self.client.get_user(bet)} does not have enough chips to bet on this race\")\n pot = arg * len(bets)\n while standings[max(standings.items(), key=operator.itemgetter(1))[0]] < 12:\n embed.clear_fields()\n standings[random.choice(racers)] += 1\n racetrack = \"🟨🟨🟨🟨🟨🟨🟨🟨🟨🟨🏁\"\n embed.add_field(name=\"\\u200b\", value=f\"{racetrack[:standings['♦️']]+ '<:Ad:656572046658109461>' +racetrack[standings['♦️']:]}\", inline=False)\n embed.add_field(name=\"\\u200b\", value=f\"{racetrack[:standings['♥️']]+ '<:Ah:656572070737608734>' +racetrack[standings['♥️']:]}\", inline=False)\n embed.add_field(name=\"\\u200b\", value=f\"{racetrack[:standings['♠️']]+ '<:As:656572090610090004>' +racetrack[standings['♠️']:]}\", inline=False)\n embed.add_field(name=\"\\u200b\", value=f\"{racetrack[:standings['♣️']]+ '<:Ac:656572024185159694>' +racetrack[standings['♣️']:]}\", inline=False)\n await status.edit(embed=embed)\n winners = dict(filter(lambda elem: elem[1] == max(standings.items(), key=operator.itemgetter(1))[0], bets.items()))\n losers = dict(filter(lambda elem: elem[1] != max(standings.items(), key=operator.itemgetter(1))[0], bets.items()))\n embed.clear_fields\n embed.add_field(name=\"Winner!\", value=f\"{max(standings.items(), key=operator.itemgetter(1))[0]} won the race!\", inline=False)\n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n if len(winners) != 0:\n for winner in winners:\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {winner}')\n result = cursor.fetchone()\n val = (result[1] - arg + int(pot/len(winners)), winner)\n cursor.execute(sql, val)\n embed.add_field(name=\"Payout\", value=f\"{self.client.get_user(winner).mention} has won <:chip:657253017262751767> **{int(pot/len(winners))}** chips\", inline=False)\n else:\n embed.add_field(name=\"Payout\", value=\"Nobody won this time around...\", inline=False)\n await ctx.send(content=None, embed=embed)\n if len(losers) != 0:\n for loser in losers:\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {loser}')\n result = cursor.fetchone()\n val = (result[1] - arg, loser)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n bets.clear()\n else:\n await ctx.send(\"An error occurred, either you do not have enough chips for this race or a race is already in progress.\")\n else:\n await ctx.send(\"You must register before you can use this command\")", "def handle_bot_command(text, user, channel):\n tokens = text.split(\" \")\n command_index = tokens.index(AT_BOT) + 1\n\n if len(tokens) <= command_index:\n return ContinueType.STANDARD\n\n if len(tokens) >= 2:\n command = tokens[command_index].upper()\n\n if command == \"IGNORE\":\n return ContinueType.IGNORE\n elif command == \"MINE\":\n return ContinueType.USER_ONLY\n elif command == \"SKIPME\":\n return ContinueType.GROUP_ONLY\n elif command == \"HELP\":\n send_slack(RADIOBOT_HELP_MSG, channel)\n return ContinueType.IGNORE\n elif command == \"ALBUM\":\n return ContinueType.ALBUM_LIST\n elif command == \"420\":\n send_slack(\":420: :bong: :bud: :bobmarley: :bud: :bong: :420:\", channel)\n return ContinueType.STANDARD\n else:\n return ContinueType.STANDARD", "def received_information(update: Update, context: CallbackContext) -> int:\r\n user_data = context.user_data\r\n text = update.message.text\r\n category = user_data['choice']\r\n user_data[category] = text\r\n del user_data['choice']\r\n\r\n update.message.reply_text(\r\n \"Genial, tu pedido está avanzando de esta manera:\"\r\n f\"{facts_to_str(user_data)}Puedes agregar algún comentario o cambio en tu orden en Comentarios...\",\r\n reply_markup=markup,\r\n )\r\n\r\n return CHOOSING", "def hire(update, context):\n #answer query and load users\n context.bot.answer_callback_query(update.callback_query.id)\n data = update.callback_query.data\n usernames = re.match(r'-3\\S*-(\\S+)-(\\S+)', data)\n username1, username2 = usernames.group(1), usernames.group(2)\n user1, user2 = um.load_user_data([username1, username2])\n #prevent user from executing if status is not 2\n if user1[\"user_status\"] != \"2\":\n return None\n #variable to control actions\n approved_action = []\n #list of possible actions to take depending on button pressed (callback_query.data)\n if \"-3-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['soldier']['price'])):\n reply_markup = mc.show_user_options(user1[\"username\"], user2[\"username\"], 5, [\"Soldiers\", \"Warriors\", \"Knights\", \"Flee\", \"Back\"], [\"3.1\", \"3.2\", \"3.3\", \"flee\", \"reshow_main\"])\n context.bot.editMessageText(chat_id=user1[\"userid\"], message_id=update.callback_query.message.message_id, text=\"Which unit do you wish to hire?\", reply_markup=reply_markup)\n elif \"-3.1-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['soldier']['price'])):\n reply_markup = mc.show_user_options(user1[\"username\"], user2[\"username\"], 5, [\"5\", \"10\", \"15\", \"Flee\", \"Back\"], [\"3.1.1\", \"3.1.2\", \"3.1.3\", \"flee\", \"3\"])\n context.bot.editMessageText(chat_id=user1[\"userid\"], message_id=update.callback_query.message.message_id, text=\"How many Soldiers do you wish to hire? (20 Gold, 5 Attack Damage each)\", reply_markup=reply_markup)\n elif \"-3.2-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['warrior']['price'])):\n reply_markup = mc.show_user_options(user1[\"username\"], user2[\"username\"], 5, [\"5\", \"10\", \"15\", \"Flee\", \"Back\"], [\"3.2.1\", \"3.2.2\", \"3.2.3\", \"flee\", \"3\"])\n context.bot.editMessageText(chat_id=user1[\"userid\"], message_id=update.callback_query.message.message_id, text=\"How many Warriors do you wish to hire? (50 Gold, 10 Attack Damage each)\", reply_markup=reply_markup)\n elif \"-3.3-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['knight']['price'])):\n reply_markup = mc.show_user_options(user1[\"username\"], user2[\"username\"], 5, [\"5\", \"10\", \"15\", \"Flee\", \"Back\"], [\"3.3.1\", \"3.3.2\", \"3.3.3\", \"flee\", \"3\"])\n context.bot.editMessageText(chat_id=user1[\"userid\"], message_id=update.callback_query.message.message_id, text=\"How many Knights do you wish to hire? (100 Gold, 20 Attack Damage each)\", reply_markup=reply_markup)\n elif \"-3.1.1-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['soldier']['price'])):\n approved_action = [\"-3.1.1-\", \"You chose to hire <b>5</b> Soldiers!\"]\n elif \"-3.1.2-\" in data and gm.check_gold(context.bot, user1[\"username\"], 10*int(config['soldier']['price'])):\n approved_action = [\"-3.1.2-\", \"You chose to hire <b>10</b> Soldiers!\"]\n elif \"-3.1.3-\" in data and gm.check_gold(context.bot, user1[\"username\"], 15*int(config['soldier']['price'])):\n approved_action = [\"-3.1.3-\", \"You chose to hire <b>15</b> Soldiers!\"]\n elif \"-3.2.1-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['warrior']['price'])):\n approved_action = [\"-3.2.1-\", \"You chose to hire <b>5</b> Warriors!\"]\n elif \"-3.2.2-\" in data and gm.check_gold(context.bot, user1[\"username\"], 10*int(config['warrior']['price'])):\n approved_action = [\"-3.2.2-\", \"You chose to hire <b>10</b> Warriors!\"]\n elif \"-3.2.3-\" in data and gm.check_gold(context.bot, user1[\"username\"], 15*int(config['warrior']['price'])):\n approved_action = [\"-3.2.3-\", \"You chose to hire <b>15</b> Warriors!\"]\n elif \"-3.3.1-\" in data and gm.check_gold(context.bot, user1[\"username\"], 5*int(config['knight']['price'])):\n approved_action = [\"-3.3.1-\", \"You chose to hire <b>5</b> Knights!\"]\n elif \"-3.3.2-\" in data and gm.check_gold(context.bot, user1[\"username\"], 10*int(config['knight']['price'])):\n approved_action = [\"-3.3.2-\", \"You chose to hire <b>10</b> Knights!\"]\n elif \"-3.3.3-\" in data and gm.check_gold(context.bot, user1[\"username\"], 15*int(config['knight']['price'])):\n approved_action = [\"-3.3.3-\", \"You chose to hire <b>15</b> Knights!\"]\n if approved_action != []:\n um.switch_user_status(user1, user2, \"3\", \"2\")\n context.bot.deleteMessage(chat_id=user1[\"userid\"], message_id=update.callback_query.message.message_id)\n context.bot.send_message(chat_id=user1[\"userid\"], text=approved_action[1], parse_mode=ParseMode.HTML)\n gm.hire(context.bot, user1[\"username\"], user2[\"username\"], approved_action[0][1:6])\n elif \"-3-\" in data or \"-3.1-\" in data or \"-3.2\" in data or \"-3.3-\" in data:\n pass\n else:\n context.bot.send_message(chat_id=user1[\"userid\"], text=\"You do not have enough gold!\")\n return None", "async def _8ball(self, ctx):\n\n # Define possible responses\n responses = ['It is certain',\n 'It is decidedly so',\n 'Without a doubt',\n 'Yes - definitely',\n 'You may rely on it',\n 'As I see it, yes',\n 'Most likely',\n 'Outlook is good',\n 'Yes',\n 'Signs indicate yes',\n 'Reply hazy, try again',\n 'Ask again later',\n 'Better not tell you now',\n 'I can\\'t give a prediction at this time',\n 'Concentrate and ask again later',\n 'Don\\'t count on it',\n 'No',\n 'My sources say no',\n 'Outlook is not so good',\n 'Very doubtful']\n # Pick random response and send it\n await ctx.send(f'{choice(responses)} {ctx.message.author.mention}')", "def handle_selection_eng(self):\n choice = self.get_input()\n if choice == '1':\n self.login_menu()\n elif choice == '2':\n self.authenticate_qr()\n elif choice == '3':\n self.authenticate_bluetooth()\n elif choice == '4':\n self.is_user = True\n self.display_main()", "async def _99hand(ctx):\n hand = NNB.hand(ctx.message.author.id)\n await ctx.message.author.send(\"Your current hand is {}.\".format(hand))\n print(ctx.message.author.name, \"requested to see their hand.\")", "def setup(bot):\n bot.add_cog(TruthOrDareCmd(bot))", "def choix_action(joueur_actu: object):\n roulette = [\"coup vertical\", \"coup horizontal\", \"rien\", \"rien\"]\n resultat_roulette = \"\"\n if joueur_actu.portefeuille_joueur >= 150:\n choix_roulette = input(\n \"{}, Vous avez actuellement {} euros dans votre portefeuille, voulez-vous faire tourner la roulette pour\"\n \" 150 euros? (o ou n) \\n\\n\".format(joueur_actu.nom_joueur, joueur_actu.portefeuille_joueur))\n print(\"score:\", joueur_actu.score)\n if choix_roulette == \"o\":\n joueur_actu.portefeuille_joueur = joueur_actu.portefeuille_joueur - 150\n joueur_actu - 150\n resultat_roulette = random.choice(roulette)\n if resultat_roulette == \"rien\":\n print(\"Dommage, vous n'avez rien gagné !\")\n else:\n print(\"Félicitations vous avez gagné le sort suivant : {} \".format(resultat_roulette))\n else:\n print(\"Vous n'avez pas assez d'argent pour faire tourner la roulette\\n\\n\")\n return resultat_roulette" ]
[ "0.6226377", "0.5935546", "0.5760656", "0.574801", "0.5716039", "0.56322324", "0.561385", "0.5553106", "0.55161047", "0.5512562", "0.5495656", "0.54472333", "0.5444497", "0.5443797", "0.5440587", "0.53947", "0.5392457", "0.5389444", "0.53876394", "0.5375197", "0.5374281", "0.53549486", "0.5354255", "0.53502434", "0.5348984", "0.5348675", "0.5346401", "0.534521", "0.5322197", "0.5321632", "0.5301211", "0.5291036", "0.52884597", "0.5282188", "0.5282188", "0.5282188", "0.52816343", "0.5278883", "0.5277357", "0.52686703", "0.52686703", "0.5263824", "0.5253885", "0.5208528", "0.52044916", "0.5188964", "0.516468", "0.5159713", "0.51583403", "0.5154651", "0.51411766", "0.51405734", "0.5137654", "0.5127871", "0.5126197", "0.5113542", "0.51097035", "0.5096862", "0.5095994", "0.5093696", "0.50922406", "0.5091207", "0.5084909", "0.50823385", "0.50751114", "0.5073694", "0.5064376", "0.50617194", "0.50540626", "0.50495213", "0.5045067", "0.50414544", "0.50410306", "0.5037803", "0.50310224", "0.5031019", "0.50301653", "0.5020683", "0.5012626", "0.5011947", "0.5009078", "0.50053346", "0.50006723", "0.49970683", "0.49959192", "0.49918258", "0.49896201", "0.49872097", "0.49865717", "0.49813882", "0.49806178", "0.49785882", "0.49731997", "0.49653348", "0.49638698", "0.49605843", "0.49555498", "0.49514684", "0.4949223", "0.49459642" ]
0.51799744
46
Leave L and b unchanged
def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff): return (0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l_degenerate(self):\n self.tmp = self.right\n self.right = self.left", "def le_inplace(a,b):", "def l_un_degenerate(self):\n self.right = self.tmp", "def lt_inplace(a,b):", "def ge_inplace(a,b):", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()", "def mod_inplace(a, b):", "def __rmul__(self, b):\n raise SystemError(\"pFun_wrapper can't act on the right (by *)\")", "def r_un_degenerate(self):\n self.left = self.tmp", "def revert(self, a):\n raise NotImplementedError", "def alphaBetaSwap(self, l1, l2):\n if not (0 <= l1 < self.numLabels) or not (0 <= l2 < self.numLabels):\n raise IndexOutOfBoundError()\n _cgco.gcoAlphaBetaSwap(self.handle, np.intc(l1), np.intc(l2))", "def sub_inplace(a, b):", "def r_degenerate(self):\n self.tmp = self.left\n self.left = self.right", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def fixup(self, l):\n\n\n fudges = [ ('A', 'B'),\n ('E', 'F') ]\n\n for x,y in fudges:\n if x in l and y not in l:\n l += y\n if y in l and x not in l:\n l += x\n\n return l", "def _set_lb(o, d):\n o.setlb(d)", "def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1", "def swap(self):\n return _coconut_tail_call(Eq, self.b, self.a)", "def turn_biases_off(self, reverse = False):\n if not reverse:\n self.b_copy = self.b.get_value()\n self.b.set_value(np.float32(0. * self.b_copy))\n else:\n self.b.set_value(self.b_copy)", "def lform(self):\n a, c, d, b = self.to_ccw()\n if b < c:\n a += b\n b -= b\n c -= b\n d += b\n else:\n a += c\n b -= c\n c -= c\n d += c\n return self.__class__.from_ccw(a, c, d, b)", "def and__inplace(a,b):", "def invert(self, a, b):\n raise NotImplementedError", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def or__inplace(a,b):", "def flip_two(lnk):\n if lnk is not Link.empty and lnk.rest is not Link.empty:\n lnk.first, lnk.rest.first = lnk.rest.first, lnk.first\n flip_two(lnk.rest.rest)", "def shift_aligners(self):\n for i in range(self.height-1, 1, -1):\n self.align.list[i] = self.align.list[i-1]\n self.align.list[1] = copy.deepcopy(self.align.list[0])", "def _adjustBlock(self, b):\n raise NotImplementedError", "def set_coord(self, l, sign, b):\n if l is not None:\n self.l = float(l)\n if b is not None:\n self.b = float(b)\n if sign == '-':\n self.b *= -1.", "def update_M_B(Bt, M, B):\n n,_ = Bt.shape\n for i in range(n):\n g = np.where(Bt[i,:]==1)[0][0]\n # print(\"g=\", g)\n M.remove(g)\n B[i].add(g)\n return M, B", "def back_substitution(L, x, y):\n n = len(L)\n for i in range(n - 1, -1, -1):\n prev_sum = 0\n for j in range(i + 1, n):\n prev_sum += L[j][i] * x[j][0]\n x[i][0] = (y[i][0] - prev_sum) / L[i][i]", "def update_coords(self, l, b):\n self.l = l\n self.b = b\n self.ra, self.dec = astLib.astCoords.convertCoords(\n \"GALACTIC\", \"J2000\", self.l, self.b, epoch=2000.)", "def flip_two_ptr(lnk):\n def helper(lnk, prev):\n if lnk is not Link.empty and lnk.rest is not Link.empty:\n if prev:\n prev.rest = lnk.rest\n temp = lnk.rest\n lnk.rest, temp.rest = lnk.rest.rest, lnk\n helper(lnk.rest, lnk)\n helper(lnk, None)", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def __zero_forward_open(x, y, c, l):\n if not c: x, y = l - y, l - x\n return x, y", "def manualSwapRow(a, b, r1, r2):\n if r2 < len(a) and r1 < len(a):\n temp = a[r1]\n a[r1] = a[r2]\n a[r2] = temp\n if b is not None: # if the result vector is not none swap him too\n temp = b[r1]\n b[r1] = b[r2]\n b[r2] = temp\n return a, b", "def __lshift__(self, other) -> 'MultiVector':\n return self.lc(other)", "def L_model_backward(AL, Y, caches):\n pass", "def revise():", "def erase(self, A):\n\n self.state = [z - a for z, a in zip(self.state, A)]\n self.state = np.array([0] + self.state[:-1])", "def set_blists(self, blists):\n self.blists = blists[:]", "def swap_main(self):\n if self.align == self._left:\n self.swap_left()\n elif self.align == self._right:\n self.swap_right()", "def Lopen(cls, a, b):\n return cls(a, b, True, False)", "def invert(self):\n self._c = ~self._c", "def gt_inplace(a,b):", "def backward(self, aw, lw):\n # This can be accelerated by storing beta for nodes without recomputing\n for vx in self.traverse_arcs_topo(reverse=True):\n # Beta for arcs into </s> = 1.0\n if len(vx.dest.exits) == 0:\n beta = 0\n else:\n beta = LOGZERO\n # For each outgoing arc from vx.dest\n for wx in vx.dest.exits:\n # Accumulate beta for this arc\n beta = np.logaddexp(beta, wx.beta)\n # Update beta for this arc\n vx.beta = beta + vx.ascr * aw + vx.lscr * lw", "def add_inplace(a, b):", "def edbl():\n bpy.ops.transform.edge_slide(value=self.btr, mirror=False, correct_uv=False)\n bpy.ops.mesh.bevel(offset=self.bofs/2 , segments=self.bss+1 , vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=(self.bts * -1) if self.dms == 1 else self.bts, use_even_offset=self.bev)\n bpy.ops.mesh.remove_doubles(threshold=self.brd)\n if self.brx == True:\n try:\n bpy.ops.mesh.looptools_relax(input='selected', interpolation='linear', iterations='3', regular=False)\n except AttributeError:\n self.report({'ERROR'},\"I'm sorry the addon 'Looptools' is not active or not installed.\")\n if self.dsp == 1:\n bpy.ops.mesh.bevel(offset=0.1, segments=2, vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=0.2, use_even_offset=False, mirror=False, proportional='CONNECTED',\n proportional_edit_falloff='SMOOTH', proportional_size=0.0839017)", "def _bd_updateB(A,W):\r\n\r\n \r\n Y = _Y(A, W)\r\n B_new = np.greater_equal(Y, 0.5).T # Update B matrix. \r\n \r\n #### setting all True rows to False ####\r\n # if feature has similar associate to all clusters, is an outlier (see Li and Zhu)\r\n # will have a row of all True by the np.greater_equal() function, reverse to make row of False\r\n \r\n # # TODO: use single outlier function and create a shared utils.py \r\n # def is_outlier(d):\r\n \r\n # if np.array_equal(d, np.array([True]*len(d))):\r\n # return np.array([False]*len(d))\r\n # else:\r\n # return d\r\n \r\n # B_new = np.apply_along_axis(is_outlier, axis = 1, arr = B_new)\r\n\r\n B_new = _is_bd_outlier(B_new)\r\n \r\n return B_new", "def invert_inplace(a):", "def erase(self, A):\n\n self.game_state = [z - a for z, a in zip(self.game_state, A)]\n self.game_state = [0] + self.game_state[:-1]", "def swap(a,b):\n temp = a\n a = b\n b = temp\n return(a,b)", "def forward_substitution(l, b):\n y = np.zeros(b.shape[0])\n y[0] = b[0] / l[0, 0]\n for i in range(1, b.shape[0]):\n _sum = np.sum(l[i, :i] * y[:i])\n y[i] = (b[i] - _sum) / l[i, i]\n return y", "def __rmul__(self, A):\n pass", "def setByLable(self, a, b, value):\n\t\tself.matrix[self.access[a]][self.access[b]] = value", "def subtract(self,l):\r\n\t\t\r\n\t\t# convert to line\r\n\t\tl = Li(l)\r\n\t\t\r\n\t\t# scale by -1 and add\r\n\t\tl = l.scale(-1)\r\n\t\ts = self.add(l)\r\n\t\t\r\n\t\treturn s", "def invert(self):\n if self.type == 'binary':\n self.leftentityid, self.rightentityid = self.rightentityid, self.leftentityid", "def LRC(self):\n pseudo_inverse = np.linalg.pinv(self.phi)\n self.g = np.dot(pseudo_inverse, self.Y)\n self.b = self.g[-1]\n self.g = self.g[:-1]", "def backward(self,x, y):\n # TODO\n self.delta[self.L-1]=self.a[self.L-1]-y\n le=len(self.delta)\n for i in range(le-2,-1,-1):\n cx= self.w[i+1][email protected][i+1]\n self.delta[i]=self.phi_d(self.z[i])*cx\n for i in range(1,self.L):\n self.dw[i]=np.asmatrix(self.delta[i])[email protected](self.a[i-1])\n self.db[i]=self.delta[i]", "def reset_boundaries(self):\n self.L = - np.random.uniform(0.0,1.0)\n self.R = self.L + 1.0\n self.Ne = 0.0\n self.Nc = 0.0", "def flip(self):\n self._start, self._end = self._end, self._start", "def merge_overwrap(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n for j in range(Ly):\n cff = z_u_w[j,N] - z_u_w[j,0]\n if self.hbls[j] + self.hbbl[j] > cff:\n self.hbls[j] = cff\n self.hbbl[j] = cff", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def switch(self, a, b):\n\n self.heap[a], self.heap[b] = self.heap[b], self.heap[a]", "def __one_forward_closed(x, y, c, l):\n x -= 1\n if not c: x, y = l - y, l - x\n return x, y", "def forward_substitution(L, b):\n n = len(L[0])\n z = [0] * n\n for i in range(0, n):\n if L[i][i] != 0:\n accum = 0\n for j in range(0, i):\n accum += L[i][j] * z[j]\n z[i] = (b[i] - accum) / L[i][i]\n return z", "def __swap(self, index_1, index_2):\n temp = self._lits[index_1]\n self._lits[index_1] = self._lits[index_2]\n self._lits[index_2] = temp", "def neq_inplace(a,b):", "def replaced(L, old, new):\n return [x if x != old else new for x in L]", "def to_tlbr(self):\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret", "def to_tlbr(self):\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret", "def to_tlbr(self):\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret", "def extend(self, L):\n self[len(self):] = L", "def revert(self, a):\n if self.is_one(a):\n return a\n else:\n raise NotReversible('only unity is reversible in a ring')", "def revert(self, *args, **kwargs):", "def unify_walk(a, b, U):\r\n opt = union(a.not_options, b.not_options)\r\n v = NotVariable(\"?\", opt)\r\n return U.merge(v, a, b)", "def backward(self, b):\n\n self.b = [b]\n\n # calculate the estimated errors on each layer ($\\delta$)\n for k,w in reversed(list(enumerate(self.weights[1:]))):\n if self.has_bias:\n delta = numpy.dot(self.b[0], w[1:].T)\n act = self.a[k+1][:,1:]\n else:\n delta = numpy.dot(self.b[0], w.T)\n act = self.a[k+1]\n self.b.insert(0, delta*self.hidden_activation.f_prime_from_f(act))\n\n self.d = []\n for a,b in zip(self.a[:-1], self.b):\n self.d.append(numpy.dot(a.T, b) / len(b))\n\n return self.d", "def __invert(self, args):", "def w_lin_update(u, Lin_lhs, Lin_rhs):\n w_lin_next = Lin_lhs.dot(u) \n violation_indices = w_lin_next - Lin_rhs > 0\n w_lin_next[violation_indices] = Lin_rhs[violation_indices]\n return w_lin_next", "def second_inplace(a):", "def reverse_difference():", "def __setstate__(self, state):\n l, bl = state\n self.layers = l\n self.best_loss = bl", "def __lshift__(self, other):\n self.disconnect(other)", "def eq_inplace(a,b):", "def cell_to_blockB(self, cell):\r\n self.blockB += 1\r\n self.blockA -= 1\r\n if cell.locked is True:\r\n self.blockB_locked += 1\r\n self.blockA_locked -= 1\r\n else:\r\n self.blockB_free += 1\r\n self.blockA_free -= 1\r\n self.blockA_cells.remove(cell)\r\n self.blockB_cells.append(cell)\r\n self.__update_cut_state()\r\n assert self.blockA >= 0\r\n assert self.blockA_free >= 0\r\n assert self.blockB >= 0\r\n assert self.blockB_free >= 0\r\n assert self.blockA_free + self.blockA_locked == self.blockA\r\n assert self.blockB_free + self.blockB_locked == self.blockB", "def merge(link_state_tuples, orig_must_be_materialized=False):\n new_state = {}\n\n link_state_tuples = link_state_tuples[:] # copy so we can mutate it\n\n # We'll have one of the links in a proper variable to \"guide\" the algorithm\n # around it. We'll iterate thru the array to mirror the changes to the rest\n sample_link, sample_state = link_state_tuples.pop()\n inputargs = sample_link.target.inputargs\n\n # the big problem here is aliasing. there can be several variables that\n # store the same vobj. this mapping must be the same across all links\n # to keep track, use the following dict, mapping\n # vobj -> (vobj1, ..., vobjn)\n # for each vobj from all the incoming links\n passed_vobjs = {}\n\n # map vobj in sample_state -> vobj in new_state\n merged_vobjs = {}\n\n inputargsindex = 0\n while inputargsindex < len(sample_link.args):\n sample_obj = sample_link.args[inputargsindex]\n sample_vobj = sample_state.get(sample_obj, None)\n targ = inputargs[inputargsindex]\n must_be_materialized = orig_must_be_materialized\n if sample_vobj is None:\n must_be_materialized = True\n\n # set the flag accordingly instead of a huge if clause\n if not must_be_materialized:\n vobj_list = [sample_vobj]\n for lnk, state in link_state_tuples:\n obj = lnk.args[inputargsindex]\n vobj = state.get(obj)\n if vobj is None:\n must_be_materialized = True\n break\n if not sample_vobj.identical_malloc_args(vobj):\n must_be_materialized = True\n vobj_list.append(vobj)\n else:\n # all the same! check aliasing\n for vobj in vobj_list:\n if vobj in passed_vobjs:\n if passed_vobjs[vobj] != vobj_list:\n # different aliasing! too bad\n must_be_materialized = True\n else:\n passed_vobjs[vobj] = vobj_list\n\n if must_be_materialized:\n # We can't merge! materialize all objects!\n changed = materialize_object(sample_obj,\n sample_state,\n sample_link.prevblock.operations)\n for lnk, state in link_state_tuples:\n changed = materialize_object(\n lnk.args[inputargsindex], state,\n lnk.prevblock.operations) or changed\n if changed:\n # we forced something! that can have all kinds of weird effects\n # if the virtual has already been passed to the target block\n # earlier. therefore, we restart.\n inputargsindex = 0\n new_state.clear()\n passed_vobjs.clear()\n merged_vobjs.clear()\n continue\n else:\n # We can merge: objects are virtual and classes are the same\n new_vobj = merged_vobjs.get(sample_vobj)\n if new_vobj is None:\n new_vobj = VirtualObject(sample_vobj.concretetype,\n sample_vobj.malloc_args)\n merged_vobjs[sample_vobj] = new_vobj\n for key, v in sample_vobj.vars.iteritems():\n m = Variable()\n m.concretetype = v.concretetype\n inputargs.insert((inputargsindex + 1), m)\n sample_link.args.insert((inputargsindex + 1), v)\n for lnk, state in link_state_tuples:\n vo = state[lnk.args[inputargsindex]]\n try:\n newarg = vo.vars[key]\n except KeyError:\n # uninitialized field!\n newarg = Constant(\n lltype.nullptr(v.concretetype.TO),\n v.concretetype)\n lnk.args.insert((inputargsindex + 1), newarg)\n new_vobj.vars[key] = m\n new_state[targ] = new_vobj\n new_vobj.aliases.add(targ)\n inputargsindex += 1\n # safety check: size of state can only shrink\n vobjset = set(new_state.values())\n for _, state in link_state_tuples:\n assert len(vobjset) <= len(set(state.values()))\n return new_state", "def retract(self, la):\n if la == []:\n return self([])\n if la[0] <= self.k:\n return self(la)\n if self.t == 1:\n return self.zero()\n else:\n kHLP = self._kBoundedRing.kHallLittlewoodP()\n return self(kHLP._m_to_kHLP_on_basis(la))", "def reset_b(self, sess):\r\n sess.run(self._init_ops)", "def g_lb(self):\n pass", "def backward(self, top, propagate_down, bottom):\r\n pass", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def update_b(color, new_b):\n\n color.update_b(new_b)", "def sub(self):\n a = self.pop()\n b = self.pop()\n c= b-a\n self.push(c)", "def broaden(mask):\n if len(mask) < 2:\n return mask\n # Note: the order in which these operations are performed is important.\n # Modifying newmask in-place with the |= operator only works for if\n # newmask[:-1] is the L-value.\n newmask = concatenate(([False], mask[1:] | mask[:-1]))\n newmask[:-1] |= mask[1:]\n return newmask", "def buble_sort(l):\r\n for i in range(len(l)):\r\n for j in range(i+1, len(l)):\r\n if (l[j-1]>l[j]):\r\n l[j-1], l[j] = l[j], l[j-1]", "def shrink(self, B, **kwargs):\n\n b = self.b.copy()\n continuous_map(b)\n bstate = self.copy(b=b)\n\n assert self.get_nonempty_B() == bstate.get_nonempty_B(), \\\n \"Error: inconsistent number of groups after copying (%d, %d)\" % \\\n (self.get_nonempty_B(), bstate.get_nonempty_B())\n\n if bstate.get_nonempty_B() < B:\n raise ValueError(\"cannot shrink state to a larger number\" +\n \" of groups: %d -> %d (total: %d)\" %\n (bstate.get_nonempty_B(), B, self.B))\n\n while bstate.get_nonempty_B() > B:\n bstate.merge_sweep(bstate.get_nonempty_B() - B, **kwargs)\n\n continuous_map(bstate.b)\n bstate = self.copy(b=bstate.b.a, Lrecdx=bstate.Lrecdx)\n\n if _bm_test():\n assert bstate.get_nonempty_B() == B, \\\n \"wrong number of groups after shrink: %d, %d\" % \\\n (bstate.get_nonempty_B(), B)\n assert bstate.wr.a.min() > 0, \"empty group after shrink!\"\n\n return bstate", "def mul_inplace(a, b):", "def __one_forward_open(x, y, c, l):\n x -= 1\n y -= 1\n if not c: x, y = l - y, l - x\n return x, y", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass" ]
[ "0.6592938", "0.6592725", "0.6477138", "0.6335981", "0.5866531", "0.58485544", "0.58470535", "0.58193916", "0.5793898", "0.5778269", "0.57440275", "0.5707928", "0.56628865", "0.56374824", "0.5598262", "0.55937904", "0.5581618", "0.5562398", "0.553753", "0.5497139", "0.54934764", "0.5459466", "0.54481363", "0.54285824", "0.5413942", "0.5393671", "0.53890985", "0.53693867", "0.5362571", "0.53476024", "0.53410614", "0.532848", "0.5317169", "0.5316005", "0.531293", "0.5305371", "0.52936524", "0.5281331", "0.5275657", "0.5247717", "0.52411383", "0.5231096", "0.52290034", "0.5228867", "0.5217835", "0.5215455", "0.52147025", "0.52132016", "0.52033645", "0.5198577", "0.5181785", "0.51657826", "0.51571506", "0.515388", "0.514853", "0.51485056", "0.5140519", "0.51374865", "0.5122481", "0.51147693", "0.5113748", "0.5108058", "0.51038307", "0.51032925", "0.50978035", "0.5096727", "0.5096645", "0.50921476", "0.50912297", "0.5079219", "0.5079219", "0.5079219", "0.50739264", "0.50679827", "0.5064369", "0.5053495", "0.5048463", "0.50482935", "0.5040802", "0.5034846", "0.5034291", "0.50328463", "0.5032748", "0.5030231", "0.50260043", "0.5021338", "0.5017265", "0.5016853", "0.50116235", "0.5010071", "0.5006459", "0.5004966", "0.50038886", "0.49956584", "0.49900445", "0.49818647", "0.4980408", "0.4972856", "0.49715224", "0.49715224", "0.49715224" ]
0.0
-1
Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value.
def valid_credentials(): if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or credentials.access_token_expired):\n return None\n return credentials", "def get_credentials():\n credentials = tools.get_credentials_file()\n session_credentials = session.get_session_credentials()\n for credentials_key in credentials:\n\n # checking for not false, but truthy value here is the desired behavior\n session_value = session_credentials.get(credentials_key)\n if session_value is False or session_value:\n credentials[credentials_key] = session_value\n return credentials", "def get_creds():\n\tcredentials = None\n\tif os.path.exists('token.pickle'):\n\t\twith open('token.pickle', 'rb') as token:\n\t\t\tcredentials = pickle.load(token)\n\t# If there are no (valid) credentials available, let the user log in.\n\tif not credentials or not credentials.valid:\n\t\tif credentials and credentials.expired and credentials.refresh_token:\n\t\t\tcredentials.refresh(Request())\n\t\telse:\n\t\t\tflow = InstalledAppFlow.from_client_secrets_file('config/sa.json', SCOPES)\n\t\t\tcredentials = flow.run_local_server(port=0)\n\t\t# Save the credentials for the next run\n\t\twith open('token.pickle', 'wb') as token:\n\t\t\tpickle.dump(credentials, token)\n\treturn credentials", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def get_creds():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('inputs/token.pickle'):\n with open('inputs/token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'inputs/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('inputs/token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return creds", "def auth_credentials(self) -> Optional[Sequence['outputs.AuthCredentialResponse']]:\n return pulumi.get(self, \"auth_credentials\")", "def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())", "def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials", "def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials", "def load_session_credentials(request_handler):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n userid = session.get_secure_cookie(name='userid')\n if userid:\n return userid, StorageByKeyName(Credentials, userid, 'credentials').get()\n else:\n return None, None", "def credentials(self):\n if self.user and self.is_authenticated():\n return AuthCredentials(['authenticated'] + self.user.permissions)\n else:\n return AuthCredentials()", "def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials", "def get_auth_token(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens,\n # and is created automatically when the authorization flow completes\n # for the first time.\n if os.path.exists(self.token_path):\n with open(self.token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.credentials_path, self.scopes)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.token_path, 'wb') as token:\n pickle.dump(creds, token)\n return creds", "def get_credentials(self, **kwargs):\n creds_file = os.path.join(kwargs['user_dir'], 'credentials.json')\n\n # Getting credentials from Storage\n store = file.Storage(creds_file)\n creds = store.get()\n\n # Validating or refreshing credentials, if necessary\n if creds is None or creds.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n creds = tools.run_flow(flow, store)\n elif creds.access_token_expired:\n creds.refresh(httplib2.Http())\n else:\n pass\n\n return creds", "def credentials_given(self):\n return self.key and self.secret", "def getsessionpasswd(cls, session):\n sessionkey = cls.sessionkey(session)\n if sessionkey in sessionmgr.keys():\n return True, sessionmgr[sessionkey]['password']\n return False, None", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def GetCredentials(self, credentials_path: str,\n client_secrets_path: str) -> Optional[Any]:\n scopes = ['openid', 'https://www.googleapis.com/auth/userinfo.email']\n credentials = None\n\n # Load credentials file if it exists\n if os.path.exists(credentials_path):\n try:\n credentials = Credentials.from_authorized_user_file(\n credentials_path, scopes)\n except ValueError as exception:\n msg = f'Error loading credentials: {exception!s}'\n self.ModuleError(msg, critical=True)\n # Refresh credentials using existing refresh_token\n if credentials and credentials.refresh_token:\n self.logger.debug('Found a refresh token. Requesting new id_token...')\n try:\n credentials.refresh(Request())\n except google_exceptions.RefreshError as exception:\n self.logger.debug(f'Error refreshing credentials: {exception!s}')\n else:\n # No credentials file, acquire new credentials from secrets file.\n self.logger.debug(\n 'Could not find existing credentials. Requesting new tokens.')\n try:\n appflow = flow.InstalledAppFlow.from_client_secrets_file(\n client_secrets_path, scopes)\n except FileNotFoundError as exception:\n msg = f'Client secrets file not found: {exception!s}'\n self.ModuleError(msg, critical=True)\n\n self.logger.info(\n 'Starting local HTTP server on localhost:8888 for OAUTH flow. '\n 'If running dftimewolf remotely over SSH you will need to tunnel '\n 'port 8888.')\n appflow.run_local_server(host='localhost', port=8888, open_browser=False)\n credentials = appflow.credentials\n\n # Save credentials\n if credentials:\n with open(credentials_path, 'w', encoding='utf-8') as token:\n token.write(credentials.to_json())\n\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def valid_credentials(self):\n path = '/api/session-user'\n url = '{}{}'.format(self._url_base, path)\n response, content = super(DSBaseService, self)._request(url,\n headers=self._headers(with_content_type=False))\n return int(response['status']) == 200", "def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_stored_credentials(user_id):\n #\n # To instantiate an OAuth2Credentials instance from a Json\n # representation, use the oauth2client.client.Credentials.new_from_json\n # class method.\n user = engine.query(User).filter(userId=user_id).first()\n if user:\n user_dict = user.__dict__\n if user_dict['credentials']:\n # credentials = Credentials.new_from_json(user['credentials'])\n credentials = json.loads(user_dict['credentials'])\n token_expiry = credentials['token_expiry']\n dexp = parser.parse(str(token_expiry))\n dexp = dexp.replace(tzinfo=None)\n dnow = datetime.now()\n\n if dexp > dnow:\n return Credentials.new_from_json(user_dict['credentials'])\n else:\n status_code, data = renew_access_token(client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n refresh_token=credentials['refresh_token'],\n )\n if status_code == INT_OK:\n credentials['access_token'] = data['access_token']\n credentials['token_expiry'] = datetime_util(datetime.now() + timedelta(seconds=float(str(data['expires_in']))))\n credentials = Credentials.new_from_json(json_encode(credentials))\n user.update_credentials(credentials.to_json())\n user.sync()\n return credentials\n else:\n return None\n else:\n return None\n return None", "def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def oauth_credentials(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfileCredentialsSapoDataOauthCredentialsArgs']]:\n return pulumi.get(self, \"oauth_credentials\")", "def credentials(self) -> pulumi.Output[Optional['outputs.CredentialsResponse']]:\n return pulumi.get(self, \"credentials\")", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def create_credentials():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server()\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n return creds", "def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_access_token(self):\n if self._service_token:\n logger.info('Use service token: %s',\n 5 * '*' + self._service_token[50:])\n return self._service_token\n\n if not all([self.app_id, self._login, self._password]):\n raise ValueError(\n 'app_id=%s, login=%s password=%s (masked) must be given'\n % (self.app_id, self._login,\n '*' * len(self._password) if self._password else 'None'))\n\n logger.info(\"Getting access token for user '%s'\" % self._login)\n with self.http_session as s:\n if self._client_secret:\n url_query_params = self.do_direct_authorization(session=s)\n else:\n self.do_login(http_session=s)\n url_query_params = self.do_implicit_flow_authorization(session=s)\n logger.debug('url_query_params: %s', url_query_params)\n\n if 'access_token' in url_query_params:\n logger.info('Access token has been gotten')\n return url_query_params['access_token']\n else:\n raise VkAuthError('OAuth2 authorization error. Url params: %s'\n % url_query_params)", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def authenticate():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'google-drive-credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def get_credentials(self, oid=None):\n path = '/credentials'\n key = 'credentials'\n if oid is not None:\n path = '%s/%s' % (path, oid)\n key = 'credential'\n res = self.client.call(path, 'GET', data='', token=self.token)\n self.logger.debug('Get openstack credentials: %s' % truncate(res))\n try:\n return res[0][key]\n except:\n raise OpenstackError('No credentials found')", "def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials", "def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:\n return pulumi.get(self, \"credentials\")", "def extractCredentials( self, request ):\n #log( 'extractCredentials')\n\n creds = {}\n session = request.SESSION\n username = None\n\n tokenTool = getToolByName(self, 'onetimetoken_storage')\n\n ob = session.get(self.session_var)\n if ob is not None and isinstance(ob, UsernameStorage):\n username = ob._getUsername()\n #log( \"session username: %s\" % username )\n \n if username is None: \n loginCode = request.get('logincode')\n\n if not loginCode:\n return None # not authenticated\n\n try:\n username = tokenTool.verifyToken(loginCode)\n except:\n log( \"Error, token tool refused token: %s\" % sys.exc_info()[0] )\n\n if not username:\n return None # not authenticated\n\n #log( \"token username: %s\" % username )\n\n userstorage = UsernameStorage()\n userstorage._setUsername(username)\n session[self.session_var] = userstorage\n\n creds['remote_host'] = request.get('REMOTE_HOST', '')\n try:\n creds['remote_address'] = request.getClientAddr()\n except AttributeError:\n creds['remote_address'] = request.get('REMOTE_ADDR', '')\n\n\n creds['login'] = username\n\n # log( \"returning username: %s\" % username )\n\n return creds", "def get_credentials():\n\thome_dir = os.path.expanduser('~')\n\tcredential_dir = os.path.join(home_dir, '.credentials')\n\tif not os.path.exists(credential_dir):\n\t\tos.makedirs(credential_dir)\n\tcredential_path = os.path.join(credential_dir, \n\t\t\t\t\t\t\t\t\t'facebook_updater.json')\n\t\t\t\t\t\t\t\t\t\n\tstore = oauth2client.file.Storage(credential_path)\n\tcredentials = store.get()\n\tif not credentials or credentials.invalid:\n\t\tflow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n\t\tflow.user_agent = APPLICATION_NAME\n\t\tif flags:\n\t\t\tcredentials = tools.run_flow(flow, store, flags)\n\t\tprint ('Storing credentials to ' + credential_path)\n\treturn credentials", "def get_credentials(client_secrets='client_secrets.json',\n scope_='https://www.googleapis.com/auth/drive',\n redirect_uri_='http://localhost:8080'):\n flow = client.flow_from_clientsecrets(client_secrets,\n scope=scope_,\n redirect_uri=redirect_uri_)\n credentials = tools.run_flow(flow, Store(), None)\n return credentials", "def get_creds():\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n creds = None\n dir_pre = '../secrets'\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n logging.debug(f\"searching for creds in path: {os.getcwd()}\")\n if os.path.exists(os.path.join(dir_pre, 'token.pickle')):\n with open(os.path.join(dir_pre, 'token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n logging.info('opening token.pickle')\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n logging.info('token expired, refreshing token')\n creds.refresh(Request())\n else:\n logging.info('token not found, re authenticating ')\n flow = InstalledAppFlow.from_client_secrets_file(os.path.join(dir_pre, 'credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(os.path.join(dir_pre, 'token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'client_secret_OCR.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n print(\"Current folder: \" + os.getcwd())\n flow = client.flow_from_clientsecrets(\n \"../../\" + CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def getsessionuser(cls, session):\n sessionkey = session[\"credential\"][\"Authorization\"]\n if sessionkey in sessionmgr.keys():\n return True, sessionmgr[sessionkey]['username']\n return False, None", "def get_credentials(self):\n try:\n with open(self.credentials_file, 'r') as fh_credentials:\n credentials_dict = json.loads(fh_credentials.read())\n return credentials_dict\n except IOError:\n self.reset_credentials()\n with open(self.credentials_file, 'r') as fh_credentials:\n return json.loads(fh_credentials.read())", "def credentials(self) -> HTTPBasicAuth:\n if self.user is None or self.password is None:\n return None\n else:\n return HTTPBasicAuth(self.user, self.password)", "def datasource_auth_credentials(self) -> Optional['outputs.SecretStoreBasedAuthCredentialsResponse']:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def oauth2_check_session(self, request):\n valid_token = False\n\n # See if they're in the request\n if 'session' in request.POST:\n print 'session from POST'\n values = self.validate_oauth_session(request.POST['session'])\n\n # Might be in the query string (e.g. from iframe)\n elif 'session' in request.GET:\n print 'session from GET'\n values = self.validate_oauth_session(request.GET['session'])\n\n # Look out for an access_token in our cookies from the JS SDK FB.init\n elif request.COOKIES:\n values = self.validate_oauth_cookie_signature(request.COOKIES)\n print 'session from COOKIE %s' % values\n\n if values and 'access_token' in values:\n request.session['oauth2_token'] = values['access_token']\n request.session['oauth2_token_expires'] = values['expires']\n self.session_key = values['session_key']\n self.uid = values['uid']\n self.added = True\n \n # If we've been accepted by the user\n if self.added:\n \n # See if we've got this user's access_token in our session\n if 'oauth2_token' in request.session:\n self.oauth2_token = request.session['oauth2_token']\n self.oauth2_token_expires = request.session['oauth2_token_expires']\n\n if self.oauth2_token_expires:\n if self.oauth2_token_expires > time.time():\n # Got a token, and it's valid\n valid_token = True\n else:\n del request.session['oauth2_token']\n del request.session['oauth2_token_expires']\n \n return valid_token", "def get_credentials(self):\n return self.credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n return PlainCredentials(self.user_name, self.password)", "def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credentials(self, client_secret_file, scopes):\n\n # Check cred file exists.\n if not os.path.exists(client_secret_file):\n Console.error(\n f\"Credential file {client_secret_file} does not exists. Check the path and try again.\")\n return None\n\n # Authenticate using service account.\n _credentials = service_account.Credentials.from_service_account_file(\n filename=client_secret_file,\n scopes=scopes)\n return _credentials", "def get_oauth_session(self):\n return self.oauth_session", "def get_reddit_oath_credentials(site_name, _current_parser=None):\n return get_multi_values(site_name, OAUTH_CRED_KEYS, _current_parser)[site_name]", "def GetUserCredentials():\n email = options.email\n if email is None:\n email = GetEmail(\"Email (login for uploading to %s)\" % options.server)\n password = getpass.getpass(\"Password for %s: \" % email)\n return (email, password)", "def validate_credentials(self, data):\n try:\n boolean_param_list = []\n get_service_data = app.config.get('JWT_CONFIG').get('CREDENTIAL')\n token_identity_param = app.config.get('JWT_CONFIG').get('TOKEN_IDENTITY_PARAM')\n expires_delta = app.config.get('JWT_CONFIG').get('TOKEN_EXPIRY')\n expires_delta = eval(expires_delta) if isinstance(expires_delta, str) else expires_delta\n credentials = data.get('credentials')\n identity_credentials_keys = list(get_service_data.keys())\n for key in identity_credentials_keys:\n if get_service_data[key] != credentials[key]:\n boolean_param_list.append(False)\n else:\n boolean_param_list.append(True)\n\n if False in boolean_param_list:\n return {'msg': \"Incorrect Credentials\"}, 401\n else:\n access_token = self.auth_token_generate(\n identity_param_val=credentials[token_identity_param], expires_delta=expires_delta)\n return {'access_token': access_token}, 200\n except Exception as e:\n print(e)\n return {'msg': \"Incorrect Credentials\"}, 401", "def auth_user_session():\n if \"user\" in request.cookies:\n userid = request.cookies[\"user\"]\n if userid:\n user = User.query.filter(User.id == userid).first()\n if user:\n if \"session_cookie\" in request.cookies and user.cookie == request.cookies[\"session_cookie\"]:\n if user.cookie_expiration > datetime.now():\n return user\n\n # Return none if failure\n return None", "def get_credentials(scopes=None, secrets=None, storage=None, *,\n no_webserver=False):\n scopes = Scopes.get(scopes)\n\n if secrets is None:\n secrets = SECRETS\n if storage is None:\n storage = STORAGE\n\n secrets, storage = map(os.path.expanduser, (secrets, storage))\n\n store = file.Storage(storage)\n creds = store.get()\n\n if creds is None or creds.invalid:\n flow = client.flow_from_clientsecrets(secrets, scopes)\n args = ['--noauth_local_webserver'] if no_webserver else []\n flags = tools.argparser.parse_args(args)\n creds = tools.run_flow(flow, store, flags)\n\n return creds", "def authenticate():\n with open(APP_KEYS_FILE) as f:\n app_keys = json.load(f)\n storage = Storage(USER_OAUTH_DATA_FILE)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(\n OAuth2WebServerFlow(\n client_id=app_keys['APP_CLIENT_ID'],\n client_secret=app_keys['APP_CLIENT_SECRET'],\n scope=['https://www.googleapis.com/auth/reminders'],\n user_agent='google reminders cli tool'),\n storage,\n )\n auth_http = credentials.authorize(httplib2.Http())\n return auth_http", "def wsfc_domain_credentials(self) -> Optional['outputs.WsfcDomainCredentialsResponse']:\n return pulumi.get(self, \"wsfc_domain_credentials\")", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'fb-drive.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n if getattr(self, 'credentials', None):\n return self.credentials\n\n scopes = settings.SCOPES\n client_secret_file = settings.CLIENT_SECRET_FILE\n application_name = 'Google Sheets API Python Quickstart'\n\n home_dir = os.path.expanduser(settings.CREDENTIALS_DIRECTORY)\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file, scopes)\n flow.user_agent = application_name\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n # print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials() -> client.Credentials:\n\n credential_path = os.path.join(HOME_DIR, \"google-credentials.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(os.path.join(HOME_DIR, CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n # This attempts to open an authorization page in the default web browser, and asks the user\n # to grant the bot access to their data. If the user grants permission, the run_flow()\n # function returns new credentials.\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print(\"Storing credentials to \" + credential_path)", "def getCredentials(self):\n if self.result(): # Accepted?\n username = self.username_le.text()\n password = \"\"\n if self.askpassword:\n password = self.password_le.text()\n\n return username, password\n\n raise CredentialDialogReject()", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'gmail-python-spam-filter.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def oauth(self) -> OAuth2Session:\n if not self._oauth:\n raise OAuth2NotSetError(OAuth2NotSetError.ERROR_MSG)\n\n return self._oauth", "def oidc_credentials(self) -> pulumi.Output[Optional['outputs.GitHubWorkflowProfileResponseOidcCredentials']]:\n return pulumi.get(self, \"oidc_credentials\")", "def authenticate(self, request):\n if 'credentials' not in request.session:\n raise LoginRequired()\n self.credentials = client.OAuth2Credentials.from_json(\n request.session['credentials'])\n if self.credentials.access_token_expired:\n raise LoginRequired()", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'admin-directory_v1-NestedGroupSync.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print 'Storing credentials to' + credential_path\n return credentials", "def verify_auth_token(cls, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n user = User.query.get(data['id'])\n if user and user.session_token == token:\n return user\n return None", "def get_credentials():\n credential_dir = os.path.dirname(os.path.realpath(CLIENT_SECRET_FILE))\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-endosys-events.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_appengine_credentials():\n return get_credentials()", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def getUsrCreds(self):\n import pickle\n import os.path\n from google_auth_oauthlib.flow import InstalledAppFlow\n from google.auth.transport.requests import Request\n\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n\n self.creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as self.token:\n self.creds = pickle.load(self.token)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n 'client_secret.json', scopes = SCOPES)\n self.creds = self.flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as self.token:\n pickle.dump(self.creds, self.token)\n\n return self.creds", "def get_stored_token():\n try:\n parser = SafeConfigParser()\n parser.read(OAUTH_FILE)\n user = parser.get('auth', 'user')\n token = parser.get('auth', 'token')\n token_date_str = parser.get('auth', 'token_date')\n except ConfigParser.Error as e:\n return None, None\n\n if user and token and token_date_str:\n date1 = datetime.datetime.strptime(token_date_str, '%Y-%m-%d').date()\n date2 = datetime.date.today()\n if (date2 - date1).days > OAUTH_EXP_DAYS:\n user, token = None, None\n\n return user, token", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]", "def datasource_auth_credentials(self) -> Optional[pulumi.Input['SecretStoreBasedAuthCredentialsArgs']]:\n return pulumi.get(self, \"datasource_auth_credentials\")", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(servise: str) -> google.oauth2.credentials.Credentials:\n\n # SQL query to get the credentials for the current user from servise credentials table\n query = f\"\"\"\n SELECT token, token_uri, client_id, refresh_token, client_secret, scopes\n FROM {servise}_credentials\n WHERE user_id=?;\n \"\"\"\n\n # Get the credentials\n with connect(DATABASE) as db:\n credentials = db.execute(query, (session[\"user_id\"],)).fetchone()\n\n # Return None if it doesn't exist it the database\n if not credentials: return None\n\n # Transfer the credentials to a dictionary\n credentials_dict = {\n \"token\": credentials[0],\n \"token_uri\": credentials[1],\n \"client_id\": credentials[2],\n \"refresh_token\": credentials[3],\n \"client_secret\": credentials[4],\n \"scopes\": None if credentials[5] is None else credentials[5].split(\" \")\n }\n\n # Return a google Credentials object\n return google.oauth2.credentials.Credentials(**credentials_dict)", "def get_credentials():\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(GoogleGsuiteAPI.CLIENT_SECRET_FILE, GoogleGsuiteAPI.SCOPES)\n flow.user_agent = GoogleGsuiteAPI.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = os.path.expanduser('/home/pi/')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(resource_path(CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def acquire_oauth2_credentials():\n if os.path.isfile(\"%s/cre.json\" % file_path):\n f = open(\"%s/cre.json\" % file_path, \"r\")\n credentials = client.OAuth2Credentials.from_json(f.read())\n f.close()\n else: \n flow = client.flow_from_clientsecrets(\n \"%s/client_secrets.json\" % file_path,\n scope='https://www.googleapis.com/auth/analytics.readonly',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n auth_uri = flow.step1_get_authorize_url()\n webbrowser.open(auth_uri)\n auth_code = input('Enter the authentication code: ')\n credentials = flow.step2_exchange(auth_code)\n write_credentials(\"%s/cre.json\" % file_path, credentials)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials" ]
[ "0.8314805", "0.67917407", "0.66121364", "0.6458647", "0.644396", "0.6412883", "0.6292584", "0.6262902", "0.6216184", "0.6159604", "0.60996187", "0.60749036", "0.60712475", "0.5996059", "0.5910496", "0.59005594", "0.5890843", "0.5890843", "0.5890843", "0.5890843", "0.5890843", "0.5879354", "0.5873639", "0.5870393", "0.5835825", "0.5814248", "0.5809308", "0.58041394", "0.5768607", "0.575834", "0.57529396", "0.5750908", "0.5741193", "0.57327014", "0.5717622", "0.569523", "0.56876564", "0.56876564", "0.56844217", "0.5678284", "0.5672838", "0.5668415", "0.5656732", "0.5655918", "0.56520855", "0.5644483", "0.5625095", "0.56227195", "0.5605515", "0.55984133", "0.5598003", "0.55888927", "0.55838805", "0.5582812", "0.55816144", "0.55756366", "0.5572468", "0.5570141", "0.55692136", "0.5552041", "0.5542717", "0.55422866", "0.5541016", "0.553958", "0.5535191", "0.5530885", "0.55217344", "0.55175555", "0.55035555", "0.54987884", "0.5494122", "0.54877883", "0.54858947", "0.548373", "0.54786056", "0.5466669", "0.5466669", "0.54603946", "0.5459983", "0.54573584", "0.54559153", "0.54558057", "0.5430749", "0.54245627", "0.541452", "0.5410776", "0.5405051", "0.5404881", "0.5394813", "0.5391549", "0.53841615", "0.5380497", "0.5379015", "0.5377957", "0.5370195", "0.5362973", "0.53615457", "0.53560424", "0.53560424" ]
0.83706456
1
We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose without a service object. Then the second call will succeed without additional authorization.
def get_gcal_service(credentials): app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate_google():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n return service", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n plusService = discovery.build('plus', 'v1', http=http_auth)\n app.logger.debug(\"Returning service\")\n return [service, plusService]", "def get_service():\n \n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n token_path = f\"{sys.path[0]}/creds/token.pickle\"\n if os.path.exists(token_path):\n with open(token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n cred_path = f\"{sys.path[0]}/creds/credentials.json\"\n flow = InstalledAppFlow.from_client_secrets_file(\n cred_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n return service", "def main():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('cal_token.json'):\r\n creds = Credentials.from_authorized_user_file('cal_token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('cal_token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n return service", "def authorize_api(self):\n\n log.debug('computing Google authentification process for \"{}\"'.format(self.school_year))\n flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, SCOPE)\n storage = Storage('credentials.dat')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, tools.argparser.parse_args())\n\n # Create an httplib2.Http object to handle our HTTP requests, and authorize it\n # using the credentials.authorize() function.\n http = httplib2.Http()\n http = credentials.authorize(http)\n httplib2.debuglevel = 0\n\n return build('calendar', 'v3', http=http)", "def func_calendar_list():\r\n creds = None\r\n global page_token\r\n #global new_calendar_list=[]\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n new_calendar_list = []\r\n for calendar_list_entry in calendar_list['items']:\r\n new_calendar_list.append(calendar_list_entry['summary'])\r\n page_token = calendar_list.get('nextPageToken')\r\n return (new_calendar_list)", "def get_cal_events(user, calservice):\r\n cal_page_token = None\r\n while True:\r\n try:\r\n #the next for loop retrives the calendar events\r\n #list to be checked for matching criteria\r\n prieml = user['primaryEmail']\r\n creator_to_del = '[email protected]'\r\n event_to_del = 'Digital Directorate Team Meeting'\r\n events = calservice.events().list(calendarId=prieml,\r\n pageToken=cal_page_token).execute()\r\n for event in events['items']:\r\n if event['status'] != 'cancelled':\r\n try:\r\n #this is the criteri to be checked against\r\n organiser = event['organizer']['email']\r\n summary = event['summary']\r\n if organiser == creator_to_del \\\r\n and summary == event_to_del:\r\n try:\r\n #checking for specific start date \r\n #in the event some events have different\r\n #dateTime\\date keywords\r\n if event['start']['dateTime']:\r\n evdate = event['start']['dateTime']\r\n startDate = datetime.strptime(evdate[0:10],\r\n '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate[0:10]))\r\n except KeyError:\r\n #if the keyword is not dateTime \r\n #then fetch date keyword\r\n evdate = event['start']['date']\r\n startDate = datetime.strptime(evdate, '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate))\r\n except KeyError:\r\n continue\r\n cal_page_token = events.get('nextPageToken')\r\n if not cal_page_token:\r\n break\r\n except ValueError:\r\n print('Oops! Thhe last event has an error. Try again...')", "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind, \"id\": id, \"summary\": summary, \"selected\": selected,\n \"primary\": primary})\n app.logger.debug(\"About to return from list_calendars with: \", result)\n return sorted(result, key=cal_sort_key)", "def main(username):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(f\"{os.environ['HOME']}/.config/.clinic/.tokens/{username}.pickle\"):\n with open(f\"{os.environ['HOME']}/.config/.clinic/.tokens/{username}.pickle\",'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(f\"{os.environ['HOME']}/.config/.clinic/credentials.json\"\n , SCOPES)\n creds = flow.run_local_server(port=0)\n # with open(username + \".pickle\", \"wb\") as token:\n with open(f\"{os.environ['HOME']}/.config/.clinic/.tokens/{username}.pickle\",'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n return service", "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars\") \n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = [ ]\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal: \n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n \n\n result.append(\n { \"kind\": kind,\n \"id\": id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary\n })\n return sorted(result, key=cal_sort_key)", "async def api_get_access(service_id, request: Request):\n service = await get_service(service_id)\n if service:\n redirect_uri = await get_service_redirect_uri(request, service_id)\n params = {\n \"response_type\": \"code\",\n \"client_id\": service.client_id,\n \"redirect_uri\": redirect_uri,\n \"scope\": \"donations.create\",\n \"state\": service.state,\n }\n endpoint_url = \"https://streamlabs.com/api/v1.0/authorize/?\"\n querystring = \"&\".join([f\"{key}={value}\" for key, value in params.items()])\n redirect_url = endpoint_url + querystring\n return RedirectResponse(redirect_url)\n else:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"Service does not exist!\"\n )", "def cal(userEvent):\n\n # def cal(userEvent):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n\n# ------------- Calling the calendar --------------\n\n # print('Printing events from', cal_name)\n\n events_result = service.events().list(calendarId=calid, timeMin=now,\n maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n\n# ---------------- grabing old events ALL EVENTS ----------------------\n# prior to deletion if parsing is succefull\n old_list = []\n\n for old_event in events:\n # print(calendarId)\n # service.events().delete(calendarId=cal_id,\n # eventId=event['id']).execute()\n\n old_event = service.events().get(\n calendarId=cal_id, eventId=old_event['id']).execute()\n\n old_list.append(old_event['id'])\n\n # print(old_list)\n\n\n# ---------------- Creating EVENTS ----------------------\n\n # event est une variable puisée dans json file (deprecated INFO)\n # event = event_json (deprecated INFO)\n\n for k in range(len(userEvent)):\n\n try:\n\n userEvent[k] = service.events().insert(\n calendarId=cal_id, body=userEvent[k]).execute()\n\n deleteGO = True\n\n except HttpError as err:\n\n # print(sys.exc_info()[1])\n\n if err.resp.status in [400, 404]:\n\n if err.resp.get('content-type', '').startswith('application/json'):\n reason = json.loads(err.content).get(\n 'error').get('errors')[0].get('message')\n\n print('\\n', \"Veuillez remplir tous les champs\")\n print(\"L'événement comportant l'erreur a été ignoré\")\n exit()\n # print(reason)\n\n # service.events().delete(calendarId=cal_id,\n # eventId=userEvent[k]).execute()\n\n deleteGO = False\n\n\n# ---------------- Printing EVENTS ----------------------\n# TB Shoot les events apparaissent avec un délai\n # print(events)\n # if not events:\n # print('No upcoming events found.')\n # for event in events:\n # start = event['start'].get('dateTime', event['start'].get('date'))\n # print(start, event['summary'])\n\n# ---------------- deleting old EVENTS ----------------------\n\n if deleteGO:\n\n for event2go in old_list:\n\n service.events().delete(calendarId=cal_id,\n eventId=event2go).execute()", "def service_authentication():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service", "def get_gcal_events(service, from_time):\n\n # The list() method returns a dict containing various metadata along with the actual calendar entries (if any). \n # It is not guaranteed to return all available events in a single call, and so may need called multiple times\n # until it indicates no more events are available, signalled by the absence of \"nextPageToken\" in the result dict\n\n logger.debug('Retrieving Google Calendar events')\n\n # make an initial call, if this returns all events we don't need to do anything else,,,\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n\n events = eventsResult.get('items', [])\n # if nextPageToken is NOT in the dict, this should be everything\n if 'nextPageToken' not in eventsResult:\n logger.info('> Found {:d} upcoming events in Google Calendar (single page)'.format(len(events)))\n return events\n\n # otherwise keep calling the method, passing back the nextPageToken each time\n while 'nextPageToken' in eventsResult:\n token = eventsResult['nextPageToken']\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n pageToken=token, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n newevents = eventsResult.get('items', [])\n events.extend(newevents)\n logger.debug('> Found {:d} events on new page, {:d} total'.format(len(newevents), len(events)))\n \n logger.info('> Found {:d} upcoming events in Google Calendar (multi page)'.format(len(events)))\n return events", "def test_get_calendar(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-calendar', subdomain=self.company.subdomain)\n \n response = self.client.post(url, {'dt': timezone.now()}, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, {'dt': timezone.now()}, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertTrue(content.has_key('calendar_data'))", "def get_service(credentials):\n try:\n creds = service_account.Credentials.from_service_account_file(\n credentials, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=creds)\n drive_service = build('drive', 'v3', credentials=creds)\n return service, drive_service\n except Exception as e:\n print(f'Error accessing Google Drive with service account '\n f'{credentials}')\n raise(e)", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def __init__(self):\n # If modifying these scopes, delete the file token.pickle.\n self.parse_args()\n self.creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n self.creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.args[\"credentials\"], SCOPES)\n self.creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(self.creds.to_json())\n\n self.service = build('calendar', 'v3', credentials=self.creds)\n self.get_calendars()\n self.choose_calendar()", "def readGoogleCal(self):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n events_result = service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n\r\n dateVar, timeVar = start.split('T')\r\n eventVar = event['summary']\r\n\r\n self.calDate.append(dateVar)\r\n self.calTime.append(timeVar)\r\n self.calEvent.append(eventVar)\r\n #print(calDate[count]+' ' + calTime[count] + ' ' +calEvent[count])\r", "def getService():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n PATH_TO_CREDENTIALS, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n return service;", "def login_with_google():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print \"Token's client ID does not match app's.\"\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n print \"data from oauth: \" + str(data)\n login_session['email'] = data['email']\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n user_id = get_user_id(login_session['email'])\n # create new user if not found in database\n if not user_id:\n user_id = new_user(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\\\"'\n output += login_session['picture']\n output += ' \\\" ' \\\n 'style = \\\"width: 300px; height: 300px;border-radius: 150px;' \\\n '-webkit-border-radius: 150px;-moz-border-radius: 150px;\\\"> '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print \"done!\"\n return output", "def buildAPICal(self, credentials):\n from googleapiclient.discovery import build\n return build('calendar', 'v3', credentials=self.creds)", "def set_api_credentials(self):\n SCOPES = 'https://www.googleapis.com/auth/calendar'\n store = file.Storage('credentials.json')\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n # Create a flow object. This object holds the client_id,\n # client_secret, and\n # SCOPES. It assists with OAuth 2.0 steps to get user\n # authorization and credentials.\n flow = OAuth2WebServerFlow(\n os.getenv('OOATH2_CLIENT_ID'),\n os.getenv('OOATH2_CLIENT_SECRET'),\n SCOPES)\n credentials = tools.run_flow(flow, store)\n api_key = os.getenv('API_KEY')\n service = build('calendar', 'v3', developerKey=api_key,\n http=credentials.authorize(Http()))\n return service", "def __init__(self, email, password):\n\n self.cal_client = gdata.calendar.client.CalendarClient(source='Google-Calendar_Python_Sample-1.0')\n self.cal_client.ClientLogin(email, password, self.cal_client.source);", "def authorize(config, flags):\n try:\n credentials = client.GoogleCredentials.get_application_default()\n print('Using application default credentials.')\n return credentials.create_scoped(_constants.API_SCOPE)\n except client.ApplicationDefaultCredentialsError:\n pass # Can safely ignore this error, since it just means none were found.\n if os.path.isfile(_constants.SERVICE_ACCOUNT_FILE):\n print('Using service account credentials from %s.' %\n _constants.SERVICE_ACCOUNT_FILE)\n return ServiceAccountCredentials.from_json_keyfile_name(\n _constants.SERVICE_ACCOUNT_FILE,\n scopes=_constants.API_SCOPE)\n elif os.path.isfile(_constants.CLIENT_SECRETS_FILE):\n print('Using OAuth2 client secrets from %s.' %\n _constants.CLIENT_SECRETS_FILE)\n message = tools.message_if_missing(_constants.CLIENT_SECRETS_FILE)\n storage = token_storage.Storage(config)\n credentials = storage.get()\n if credentials is not None and not credentials.invalid:\n return credentials\n message = tools.message_if_missing(_constants.CLIENT_SECRETS_FILE)\n flow = client.flow_from_clientsecrets(\n _constants.CLIENT_SECRETS_FILE,\n scope=_constants.API_SCOPE,\n message=message,\n login_hint=config['emailAddress'])\n return tools.run_flow(flow, storage, flags)\n print('No OAuth2 authentication files found. Checked:', file=sys.stderr)\n print('- Google Application Default Credentials', file=sys.stderr)\n print('- %s' % _constants.SERVICE_ACCOUNT_FILE, file=sys.stderr)\n print('- %s' % _constants.CLIENT_SECRETS_FILE, file=sys.stderr)\n print('Please read the accompanying documentation.', file=sys.stderr)\n sys.exit(1)\n return None", "def get_service():\n creds = None\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service", "def get_credentials(service, sandbox=True):\n srv = service.lower()\n srv_param = resolve_service(srv)\n if srv_param is None:\n return\n\n client_id, client_secret, scope, storage = srv_param\n if srv == 'evernote':\n return evernote_auth(client_id, client_secret, storage, sandbox)\n else:\n return google_auth(client_id, client_secret, scope, storage)", "def getUsrCals(self, service):\n return self.service.calendarList().list().execute()", "def get_credentials():\n scope = ['https://www.googleapis.com/auth/adsense.readonly',\n 'https://www.googleapis.com/auth/analytics.readonly']\n\n #get your client secret file\n cwd = os.getcwd()\n pathToFile = os.path.join(cwd,\n 'YOURCLIENTSECRETPATH.json')\n print \"This is your client secret path:\",pathToFile\n\n #first part of the folow process\n #https://developers.google.com/api-client-library/python/guide/aaa_oauth\n flow = oauth2client.client.flow_from_clientsecrets(pathToFile,scope,redirect_uri='urn:ietf:wg:oauth:2.0:oob')#'urn:ietf:wg:oauth:2.0:oob'\n \n #check to see if you have something already\n storage = oauth2client.file.Storage('creds.dat') #this is a made up file name\n credentials = storage.get()\n \n #if they dont exist already go ahead and get them\n if not credentials or credentials.invalid:\n #get authorization url\n auth_url = flow.step1_get_authorize_url()\n #open the url to get a code\n webbrowser.open(auth_url)\n\n #enter the code to reauth\n codeStr = str(raw_input('enter code here:'))\n credentials = flow.step2_exchange(codeStr)\n #save the code to the dat\n storage = oauth2client.file.Storage('creds.dat')\n storage.put(credentials)\n \n return credentials\n\n else:\n return credentials", "def check_authorization(*services):\n def wrapper(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n logger.info('Decorator check authorization')\n for service in services:\n if STATE[service] is not None:\n continue\n else:\n logger.info('Start authorization process')\n logger.info(service)\n logger.info(CREDENTIALS[service]['client_id'])\n remote = build_remote_app(CREDENTIALS[service]['client_id'],\n CREDENTIALS[service]['client_secret'],\n {'scope': SERVICES[service]['SCOPES'], 'jwt-bearer': JWT}, service)\n PENDING_AUTHORIZATION[service] = remote\n logger.info('Sending authorization request for service {}'.format(service))\n return remote.authorize(callback=SERVICES[service]['CALLBACK'])\n return f(*args, **kwargs)\n return decorated\n return wrapper", "def get_api_credentials(scope, service_account=True):\n\tSTORAGE = file.Storage('oAuth2.json') #local storage of oAuth tokens\n\tcredentials = STORAGE.get()\n\tif credentials is None or credentials.invalid: #check if new oAuth flow is needed\n\t\tif service_account: #server 2 server flow\n##\t\t\twith open(SERVICE_ACCOUNT_FILE) as f:\n##\t\t\t\taccount = json.loads(f.read())\n##\t\t\t\temail = account['client_email']\n##\t\t\t\tkey = account['private_key']\n\t\t\tcredentials = ServiceAccountCredentials.from_json_keyfile_name(SERVICE_ACCOUNT_FILE, scope)\n##\t\t\tcredentials = client.SignedJwtAssertionCredentials(email, key, scope=scope)\n\t\t\tSTORAGE.put(credentials)\n\t\telse: #Application Default Credentials (ADC)\n\t\t\tcredentials = GoogleCredentials.get_application_default()\n\t\t\treturn discovery.build('vision', 'v1', credentials=credentials,\n discoveryServiceUrl=DISCOVERY_URL)\t \n##\t\telse: #normal oAuth2 flow\n##\t\t\tCLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')\n##\t\t\tFLOW = client.flow_from_clientsecrets(CLIENT_SECRETS, scope=scope)\n##\t\t\tPARSER = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser])\n##\t\t\tFLAGS = PARSER.parse_args(sys.argv[1:])\n##\t\t\tcredentials = tools.run_flow(FLOW, STORAGE, FLAGS)\n\t\t\n\treturn credentials", "def test_get_with_service(self):\n HostingServiceAccount.objects.create(\n service_name='googlecode',\n username='bob')\n\n account = HostingServiceAccount.objects.create(\n service_name='github',\n username='bob')\n\n rsp = self.api_get(\n get_hosting_service_account_list_url(),\n data={'service': 'github'},\n expected_mimetype=hosting_service_account_list_mimetype)\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(len(rsp['hosting_service_accounts']), 1)\n self.compare_item(rsp['hosting_service_accounts'][0], account)", "def __init__(self, credentials):\n self.credentials = credentials\n http = httplib2.Http()\n http = self.credentials.authorize(http)\n self.service = build(\"drive\", \"v2\", http=http)", "def init_api(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(self.gdrive_config.TOKEN_PICK_PATH):\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.gdrive_config.CREDENTIAL_PATH, self.gdrive_config.SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def watch_namespaced_o_auth_client_authorization_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_o_auth_client_authorization_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/oauthclientauthorizations'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_service(credentials_folder, version='v3'):\n credentials = get_credentials(credentials_folder)\n http = credentials.authorize(httplib2.Http(cache=\".cache\"))\n service = discovery.build('drive', version, http=http)\n return service", "def go():\n # Authenticate\n print('****************** Authenticate ******************')\n\n creds = None\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n print('****************** Load Token ******************')\n\n creds = pickle.load(token)\n\n if not creds or not creds.valid:\n print('****************** Credentials ******************')\n\n if creds and creds.expired and creds.refresh_token:\n print('****************** Refresh Credentials ******************')\n\n creds.refresh(Request())\n else:\n print('****************** Load Credentials ******************')\n\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n with open('token.pickle', 'wb') as token:\n print('****************** Dump Token ******************')\n\n pickle.dump(creds, token)\n\n print('****************** Load Service ******************')\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Set Date Range\n print('****************** Set Date Range ******************')\n \n start_datetime = datetime.today() - timedelta(days=2)\n end_datetime = datetime.today() + timedelta(days=2)\n\n start_date = start_datetime.strftime(\"%Y/%m/%d\")\n end_date = end_datetime.strftime(\"%Y/%m/%d\")\n\n print(start_date)\n print(end_date)\n\n # Set Query\n print('****************** Set Query ******************')\n\n user_id = 'me'\n full = 'full'\n query = 'after:' + start_date + ' before:' + end_date + ' subject:Your Single Transaction Alert from Chase'\n\n print(query)\n\n # List Messages (All Pages)\n print('****************** Run Query ******************')\n\n response = service.users().messages().list(userId=user_id, q=query).execute()\n\n messages_all_pages = []\n\n if 'messages' in response:\n messages_all_pages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()\n messages_all_pages.extend(response['messages'])\n\n messages = messages_all_pages\n\n # Find Transactions in Message List\n if not messages:\n print('No messages found...')\n else:\n for message in messages:\n queue_id = message['id']\n\n # Get Message\n this_message = service.users().messages().get(userId=user_id, id=queue_id, format=full).execute()\n\n # Set Message\n message_body = this_message['payload']['body']['data']\n message_html = base64.urlsafe_b64decode(message_body)\n message_text = message_html.decode('utf-8').replace('($USD) ', '')\n\n # Set Transaction Date\n date_message = int(this_message['internalDate'])\n date_object = (date_message / 1000)\n transaction_date = datetime.fromtimestamp(date_object).strftime(\"%Y-%m-%d\")\n\n # Set Amount\n amount = re.search('A charge of (.+?) at', message_text).group(1)\n\n # Set Description\n description = re.search('at (.+?) has', message_text).group(1)\n\n # Build Transaction\n transaction = {\n 'QueueID': queue_id,\n 'TransactionTypeID': 2,\n 'TransactionDT': transaction_date,\n 'Description': description,\n 'Amount': amount,\n 'BudgetCategoryID': '103',\n 'TransactionNumber': '',\n 'Note': 'CC'\n }\n\n print('****************** Transaction Found ******************')\n print(transaction)\n\n # Send to Queue\n response_data = requests.post(url=BUDGET_API, data=transaction)\n\n result = response_data.text\n\n if result == '1':\n print('****************** Transaction Queued ******************')", "def test_authorization_not_needed_get(self):\n new_client = APIClient()\n res = new_client.get('/events/', kwargs={'pk': 3}, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def main(days_prior=None, print_visible=True, target_calendar=None):\n\n # Fetcher:\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Get a list of all my Google Calendars\n page_token = None\n calendar_list = None\n while True:\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\n page_token = calendar_list.get('nextPageToken')\n if not page_token:\n break\n\n # Set up Logger\n # using the keyword `with` means the .csv will automatically close\n today = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n\n day_to_log = None\n if days_prior is None:\n if len(sys.argv) == 1:\n # by default, we log all of yesterday's events\n days_prior = 1\n elif len(sys.argv) == 2:\n days_prior = int(sys.argv[1])\n else:\n print('Invalid combination or number of CLI arguments. No calendar data pulled. Exiting script.')\n sys.exit()\n\n day_to_log = today - datetime.timedelta(days=days_prior)\n time_tracking_file = day_to_log.isoformat()\n with open(f'/home/mhtl/Projects/automated-timetracking/' +\n f'timetracking_data/{time_tracking_file}.csv', 'w') as f:\n # Start CSV heading\n new_row = f'start,end,summary,calendar\\n'\n f.write(new_row)\n\n # Call the Calendar API\n timezone_offset = '-04:00' # because of Toronto timezone with respect to UTC\n day_logged = None\n if len(sys.argv) == 1:\n day_logged = 'yesterday'\n else:\n day_logged = day_to_log.strftime('%A, %B %d, %Y')\n if print_visible:\n print(f'Getting all visible events from {day_logged}.')\n\n calendar_list['items'] = [c for c in calendar_list['items'] if 'selected' in c.keys()]\n calendar_list['items'].sort(key=lambda c: c.get('summary', ''))\n for counter, calendar_list_entry in enumerate(calendar_list['items']):\n if print_visible:\n print(f'{counter+1} out of {len(calendar_list[\"items\"])} calendars appended. ' +\n f'Calendar Name: {calendar_list_entry[\"summary\"]}')\n events_result = service.events().list(calendarId=calendar_list_entry['id'],\n timeMin=day_to_log.isoformat() + timezone_offset,\n timeMax=today.isoformat() + timezone_offset,\n maxResults=2500, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n # Add to CSV via Logger\n if not events:\n pass\n # print('No events from requested day found.')\n else:\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n end = event['end'].get('dateTime', event['end'].get('date'))\n # swap commas with semi-colons so values don't get misinterpreted as different fields\n summary = event.get(\"summary\", '').replace(',', ';')\n calendar = calendar_list_entry.get(\"summary\", '').replace(',', ';')\n new_row = f'{start},{end},{summary},{calendar}\\n'\n f.write(new_row)\n if print_visible:\n print(f'Done logging to {time_tracking_file}.csv!')\n print(f'Logging completed at {datetime.datetime.today().strftime(\"%I:%M%p, %A, %B %d, %Y\")}')", "def get_google_service_account(cls, handler):\n\n if roles.Roles.is_super_admin():\n # ?tab= for v1.9, ?action= for v1.8\n exit_url = '%s?tab=google_service_account' % handler.LINK_URL\n else:\n exit_url = cls.request.referer\n rest_url = GoogleServiceAccountRESTHandler.URI\n\n template_values = {}\n template_values['page_title'] = handler.format_title(\n 'Google Service Accounts')\n\n content = safe_dom.NodeList()\n edit_google_service_account_action = (\n base.GoogleServiceAccountBase.\n DASHBOARD_EDIT_SERVICE_ACCOUNT_ACTION)\n\n for name, key in (service_account_models.GoogleServiceAccountTypes.\n to_dict().iteritems()):\n content.append(\n safe_dom.Element(\n 'a', id=edit_google_service_account_action,\n className='gcb-button gcb-pull-right', role='button',\n style='margin: 5px',\n href='%s?action=%s&key=%s&credential_type=%s' % (\n handler.LINK_URL, edit_google_service_account_action,\n key, key)\n ).add_text('Add/Edit %s object' % name)\n )\n\n # Title - Default Settings\n content.append(\n safe_dom.Element('h3').add_text('Default Settings')\n )\n\n # Table - Default Settings\n table_div = safe_dom.Element(\n 'div', style='width: 100%; overflow: scroll; margin-top: 10px;')\n table = safe_dom.Element('table')\n table_div.add_child(table)\n content.append(table_div)\n\n table_heading = safe_dom.Element('tr')\n for attr in cls.TABLE_HEADING_LIST:\n table_heading.add_child(\n safe_dom.Element('th').add_text(attr))\n\n # table_heading.add_child(\n # safe_dom.Element('th').add_text('Edit Link'))\n\n table.add_child(table_heading)\n\n all_settings = (\n google_service_account.GoogleServiceManager.\n get_all_default_settings())\n\n # TODO(rthakker) Add support for namespaces from course list etc\n # later on\n for entity in all_settings:\n tr = safe_dom.Element('tr')\n table.add_child(tr)\n args = {\n 'action': edit_google_service_account_action,\n 'key': entity.id,\n }\n\n for attr in cls.TABLE_HEADING_LIST:\n tr.add_child(safe_dom.Element('td').add_text(\n getattr(entity, attr)\n ))\n\n # href = '%s?%s' % (handler.LINK_URL, urllib.urlencode(args))\n # link = safe_dom.Element(\n # 'a', href=href, type='button', className='gcb-button'\n # ).add_text('Edit')\n # edit_td = safe_dom.Element('td')\n # edit_td.add_child(link)\n # tr.add_child(edit_td)\n\n\n content.append(\n safe_dom.Element('p').add_text('Total: %d' % len(all_settings))\n )\n template_values['main_content'] = content\n handler.render_page(template_values)", "def google_drive_authenticate(self):", "def get_tasks():\n outbound_tasks = []\n outbound_tasks_with_due_dates = []\n creds = None\n current_path = os.path.dirname(os.path.abspath(__file__))\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n picked_token_path = current_path + '/token.pickle'\n print(picked_token_path)\n if os.path.exists(picked_token_path):\n with open(picked_token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n current_path + '/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(picked_token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('tasks', 'v1', credentials=creds,\n cache=DiscoveryCache()) # https://github.com/googleapis/google-api-python-client/issues/325\n\n # Call the Tasks API\n tasks = service.tasks().list(tasklist='@default').execute()\n\n for task in tasks['items']:\n reduced = task_reducer(task)\n if reduced is not None:\n if 'due' in reduced:\n outbound_tasks_with_due_dates.append(reduced)\n else:\n outbound_tasks.append(reduced)\n\n outbound_tasks_with_due_dates.sort(key=sort_by_due_date)\n outbound_tasks[:0] = outbound_tasks_with_due_dates\n\n return outbound_tasks", "def gconnect():\n\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Obtain authorization code\n code = request.data\n try:\n\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('go_client_secrets.json',\n scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' %\n access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n\n # If there was an error in the access token, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 50)\n response.headers['Content-Type'] = 'application/json'\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != GO_CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print \"Token's client ID does not match app's.\"\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use, and set provider.\n login_session['access_token'] = credentials.access_token\n login_session['provider'] = \"Google\"\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # Check if user is already in DB\n user_id = getUserID(login_session['email'])\n\n # If not, create the user in the DB based on the login session\n if not user_id:\n user_id = createUser(login_session)\n # Finally add the user id info to the login session information\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 300px; height: 300px;border-radius: 150px;'\n output += '-webkit-border-radius: 150px;-moz-border-radius: 150px;\"> '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print \"done!\"\n\n return output", "def gconnect():\r\n # Validate state token for CSFP\r\n if request.args.get('state') != login_session['state']:\r\n response = make_response(json.dumps('Invalid state parameter.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Get authorization code from client\r\n code = request.data\r\n\r\n try:\r\n # Get access token using client auth code and app creds\r\n oauth_flow = flow_from_clientsecrets('google_client_secret.json',\r\n scope='')\r\n oauth_flow.redirect_uri = 'postmessage'\r\n credentials = oauth_flow.step2_exchange(code)\r\n except FlowExchangeError:\r\n response = make_response(\r\n json.dumps('Failed to get the access token.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Check that the access token is valid.\r\n access_token = credentials.access_token\r\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\r\n % access_token)\r\n h = httplib2.Http()\r\n result = json.loads(h.request(url, 'GET')[1])\r\n # If there was an error in the access token info, abort.\r\n if result.get('error') is not None:\r\n response = make_response(json.dumps(result.get('error')), 500)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Verify that the access token is used for the intended user.\r\n gplus_id = credentials.id_token['sub']\r\n if result['user_id'] != gplus_id:\r\n response = make_response(\r\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Verify that the access token is valid for this app.\r\n if result['issued_to'] != GOOGLE_CLIENT_ID:\r\n response = make_response(\r\n json.dumps(\"Token's client ID does not match app's.\"), 401)\r\n print(\"Token's client ID does not match app's.\")\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n stored_access_token = login_session.get('access_token')\r\n stored_gplus_id = login_session.get('gplus_id')\r\n if stored_access_token is not None and gplus_id == stored_gplus_id:\r\n response = make_response(\r\n json.dumps('Current user is already connected.'), 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Store the access token in the session for later use.\r\n login_session['access_token'] = credentials.access_token\r\n login_session['gplus_id'] = gplus_id\r\n\r\n # Get user info\r\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\r\n params = {'access_token': credentials.access_token, 'alt': 'json'}\r\n answer = requests.get(userinfo_url, params=params)\r\n\r\n data = answer.json()\r\n login_session['username'] = data['name']\r\n login_session['picture'] = data['picture']\r\n login_session['email'] = data['email']\r\n\r\n # See if user exists, if it doesn't make a new one\r\n user_id = getUserID(login_session['email'])\r\n if not user_id:\r\n username = login_session['username']\r\n if username is None or len(username) == 0:\r\n username = login_session['email'].split('@')[0]\r\n login_session['username'] = username\r\n user_id = createUser(login_session)\r\n else:\r\n login_session['username'] = getUserInfo(user_id).name\r\n\r\n login_session['user_id'] = user_id\r\n\r\n # welcome message\r\n output = ''\r\n output += '<h1>Welcome, '\r\n output += login_session['username']\r\n output += '!</h1>'\r\n output += '<img src=\"'\r\n output += login_session['picture']\r\n output += ' \" style = \"width: 300px; height: 300px;border-radius: 150px;\\\r\n -webkit-border-radius: 150px;-moz-border-radius: 150px;\"> '\r\n flash(\"you are now logged in as %s\" % login_session['username'])\r\n print('done!')\r\n return output", "def oauth2callbackmeeting():\n app.logger.debug(\"Entering oauth2callback meeting\")\n if(isMain):\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope=SCOPES,\n redirect_uri=flask.url_for('oauth2callbackmeeting', _external=True))\n else:\n \t# from Heroku, a clientID and client secrets are needed for OAuth.\n \t# Normally these are taken from client_secrets.json, \n \t# but they can be manually entered, eliminating the need for the .json file\n flow = OAuth2WebServerFlow(client_id=clientId,\n client_secret=clientSecret,\n scope=SCOPES,\n redirect_uri=flask.url_for('oauth2callbackmeeting', _external=True))\n\n # Note we are *not* redirecting above. We are noting *where*\n # we will redirect to, which is this function.\n\n # The *second* time we enter here, it's a callback\n # with 'code' set in the URL parameter. If we don't\n # see that, it must be the first time through, so we\n # need to do step 1.\n app.logger.debug(\"Got flow meeting\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args meeting\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n # This will redirect back here, but the second time through\n # we'll have the 'code' parameter set\n else:\n # It's the second time through ... we can tell because\n # we got the 'code' argument in the URL.\n app.logger.debug(\"Code was in flask.request.args meeting\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n # Now I can build the service and execute the query,\n # but for the moment I'll just log it and go back to\n # the main screen\n app.logger.debug(\"Got credentials for meeting\")\n meetingID = flask.session['meetingID']\n return flask.redirect(flask.url_for('meeting', meetingID=meetingID))", "def calendar_options():\n user_id = current_identity.id\n\n if user_id:\n response = {\n \"status\": None,\n \"dateRange\" : []\n }\n \n possibleDateArr = query_month_year(user_id)\n\n if not possibleDateArr:\n return jsonify({\"status\" : \"error\"})\n\n response[\"dateRange\"] = format_dateRange(possibleDateArr)\n #Todo: Add dateArray infor to response\n response[\"status\"] = \"ok\"\n return jsonify(response)\n #TODO How handle if no user- id send to homepage but notices?", "def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service", "async def api_authenticate_service(\n service_id, request: Request, code: str = Query(...), state: str = Query(...)\n):\n\n service = await get_service(service_id)\n assert service\n\n if service.state != state:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"State doesn't match!\"\n )\n\n redirect_uri = request.url.scheme + \"://\" + request.headers[\"Host\"]\n redirect_uri += f\"/streamalerts/api/v1/authenticate/{service_id}\"\n url, success = await authenticate_service(service_id, code, redirect_uri)\n if success:\n return RedirectResponse(url)\n else:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST, detail=\"Service already authenticated!\"\n )", "def getUsrCreds(self):\n import pickle\n import os.path\n from google_auth_oauthlib.flow import InstalledAppFlow\n from google.auth.transport.requests import Request\n\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n\n self.creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as self.token:\n self.creds = pickle.load(self.token)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n 'client_secret.json', scopes = SCOPES)\n self.creds = self.flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as self.token:\n pickle.dump(self.creds, self.token)\n\n return self.creds", "def main(fp, exec_mode):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n\r\n \r\n if(exec_mode == '7-day'):\r\n today = datetime.datetime.now()\r\n d = datetime.timedelta(days=7) # want to grab on a weekly basis\r\n a = today - d\r\n start = a.isoformat() + 'Z'\r\n else: \r\n start, _ = week_magic()\r\n start = start.isoformat() + 'Z'\r\n current = datetime.datetime.now().isoformat()\r\n\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n # print('Getting the upcoming 10 events')\r\n print('Getting past events from the beginning of the week')\r\n events_result = service.events().list(calendarId='primary', timeMin=start, timeMax=now,\r\n maxResults=50, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n events_dict = {}\r\n cumulative_hours = 0\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n end = event['end'].get('dateTime', event['end'].get('date'))\r\n\r\n # compute length of each event\r\n difference = convert_hr_float(get_date_object(end) - get_date_object(start))\r\n print(\"length of event {}: {}\".format(event['summary'], difference), file=fp)\r\n\r\n cumulative_hours = cumulative_hours + difference\r\n\r\n # start filtering events by category\r\n\r\n # use regex\r\n p = re.compile(\"\\[.*?\\]\")\r\n result = p.findall(event['summary'])\r\n \r\n try:\r\n category = result[0]\r\n try:\r\n hi = result[1]\r\n try:\r\n events_dict[result[1] + ' ' + category] = events_dict[result[1] + ' ' + category] + difference\r\n except KeyError:\r\n events_dict[result[1] + ' ' + category] = 0\r\n events_dict[result[1] + ' ' + category] = events_dict[result[1] + ' ' + category] + difference\r\n except:\r\n try:\r\n events_dict[category] = events_dict[category] + difference\r\n except KeyError:\r\n events_dict[category] = 0\r\n events_dict[category] = events_dict[category] + difference\r\n\r\n except:\r\n try:\r\n events_dict[\"Misc\"] = events_dict[\"Misc\"] + difference\r\n except KeyError:\r\n events_dict[\"Misc\"] = 0\r\n events_dict[\"Misc\"] = events_dict[\"Misc\"] + difference\r\n\r\n # retrieve the sleep calendar\r\n # [email protected]\r\n\r\n print('Getting sleep data')\r\n\r\n # have to recalculate time\r\n if(exec_mode == '7-day'):\r\n today = datetime.datetime.now()\r\n d = datetime.timedelta(days=7) # want to grab on a weekly basis\r\n a = today - d\r\n start = a.isoformat() + 'Z'\r\n else: \r\n #TODO: calculate time elapsed since beginning of week till now, right now I only calculate 7 days\r\n start, _ = week_magic()\r\n start = start.isoformat() + 'Z'\r\n current = datetime.datetime.now().isoformat()\r\n\r\n events_result = service.events().list(calendarId='[email protected]', timeMin=start, timeMax=now,\r\n maxResults=50, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n # events_dict = {}\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n end = event['end'].get('dateTime', event['end'].get('date'))\r\n\r\n # compute length of each event\r\n difference = convert_hr_float(get_date_object(end) - get_date_object(start))\r\n print(\"length of sleep period {}: {}\".format(event['summary'], difference), file=fp)\r\n cumulative_hours = cumulative_hours + difference\r\n\r\n # if event['summary'] is 'Sleep':\r\n try:\r\n events_dict[\"Sleep\"] = events_dict[\"Sleep\"] + difference\r\n except KeyError:\r\n events_dict[\"Sleep\"] = 0\r\n events_dict[\"Sleep\"] = events_dict[\"Sleep\"] + difference\r\n\r\n # calculation to account for white space (put in misc)\r\n # if exec_mode == '7-day':\r\n white_space = (7 * 24) - cumulative_hours\r\n try:\r\n events_dict[\"Misc\"] = events_dict[\"Misc\"] + white_space\r\n except KeyError:\r\n events_dict[\"Misc\"] = 0\r\n events_dict[\"Misc\"] = events_dict[\"Misc\"] + white_space\r\n\r\n print(events_dict)\r\n\r\n display_chart(events_dict)", "def upload_get_authenticated_service(args):\r\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\r\n scope=YOUTUBE_UPLOAD_SCOPE,\r\n message=MISSING_CLIENT_SECRETS_MESSAGE)\r\n\r\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\r\n credentials = storage.get()\r\n\r\n if credentials is None or credentials.invalid:\r\n credentials = run_flow(flow, storage, args)\r\n\r\n return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n http=credentials.authorize(httplib2.Http()))", "def get_authenticated_service(api_name: str, api_version: str) -> Resource:\n\n if CREDS_FILENAME.exists():\n credentials = Credentials.from_authorized_user_file(str(CREDS_FILENAME))\n # TODO make request to the access token endpoint???\n\n # FIXME verifying token\n # credentials.refresh(requests.Request())\n # print(credentials.token, credentials.expiry)\n\n # idinfo = id_token.verify_oauth2_token(\n # credentials.token, requests.Request(), credentials.client_id)\n\n # if idinfo['iss'] not in ['accounts.google.com',\n # 'https://accounts.google.com']:\n # # CREDS_FILENAME.unlink()\n # raise ValueError('Wrong issuer.')\n\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)\n credentials = flow.run_local_server(\n host=\"localhost\",\n port=8080,\n authorization_prompt_message=\"Please visit this URL: {url}\",\n success_message=\"The auth flow is complete; you may close this window.\",\n open_browser=True,\n )\n\n creds_data = {\n \"token\": None,\n \"refresh_token\": credentials.refresh_token,\n \"token_uri\": credentials.token_uri,\n \"client_id\": credentials.client_id,\n \"client_secret\": credentials.client_secret,\n \"scopes\": credentials.scopes,\n }\n\n with CREDS_FILENAME.open(\"w\") as outfile:\n json.dump(creds_data, outfile)\n\n return build(api_name, api_version, credentials=credentials)", "def choose_calendar(self):\n page_token = None\n self.calendar_list = self.service.calendarList().list(pageToken=page_token).execute()\n for calendar_list_entry in self.calendar_list['items']:\n if similar(calendar_list_entry['summary'], self.args[\"calendar_name\"]) > 0.8:\n self.chosen_calendar = calendar_list_entry['id']\n return\n raise CalendarNotFoundException(\"No calendar with the provided name was found\")", "def gconnect():\n # Confirm that client and server tokens match\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Verify that the access token is valid in general.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token={}'\n .format(access_token))\n resp = requests.get(url=url)\n result = json.loads(resp.text)\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Verify that the access token is valid for the user.\n user_id = credentials.id_token['sub']\n print('Google User ID is {}.'.format(user_id))\n print('Result from Google access token is:', '\\n', '{}.'\n .format(result))\n if result['user_id'] != user_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n print('Access token valid for the user.')\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n stored_credentials = login_session.get('credentials')\n stored_user_id = login_session.get('user_id')\n if stored_credentials is not None and user_id == stored_user_id:\n print('User is already connected.')\n response = make_response(json.dumps(\n 'User is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n print('Access token valid for this app.')\n # Store the access token in the session for later use.\n login_session['credentials'] = credentials.token_uri\n login_session['user_id'] = user_id\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n login_session['name'] = data['name']\n login_session['email'] = data['email']\n # Verify contents of login_session\n print('login_session object currently contains:', '\\n', '{}'\n .format(login_session))\n # Check database for user\n user = (session.query(Users)\n .filter_by(email=login_session['email'])\n .first())\n if user:\n print('{} already exists.'.format(data['email']))\n # Create new user if user does not already exist\n else:\n new_user = Users(name=login_session['name'],\n email=login_session['email'])\n session.add(new_user)\n session.commit()\n print('New user {} added to database.'.format(login_session['email']))\n output = ('<h3 class=\"font-weight-light\">Welcome, {}!</h3>'\n .format(login_session['name']))\n flash('Logged in as {}.'.format(login_session['email']))\n print('Logged in as {}.'.format(login_session['email']))\n print('Done!')\n return output", "def google_session(self):\n creds = None\n SCOPES = ['https://www.googleapis.com/auth/admin.reports.audit.readonly']\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.creds_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('admin', 'reports_v1', credentials=creds)\n\n return service", "def calendar_view_basic(request, owner_type, owner_id):\n\n # Like before, obtain the context for the user's request.\n context = RequestContext(request)\n\n user = request.user\n profile = get_profile(user)\n user_profile = profile[0]\n\n if request.method == 'GET':\n verified_obj = verified_calendar(context, owner_type, owner_id, user)\n if not isinstance(verified_obj, HttpResponse):\n calendar, edit_priv = verified_obj\n events = calendar.event_set.all()\n else:\n return verified_obj\n\n response_object = {'calendar' : calendar, 'events': events,\n 'edit_priv': edit_priv, 'owner_type': owner_type,\n }\n\n if owner_type == \"user\":\n\n # send school calendar\n profile_school = user_profile.getSchool()\n response_object['school'] = profile_school\n if profile_school:\n response_object['school_events'] = profile_school.cal.event_set.all()\n\n # send course calendars\n if isinstance(user_profile, Instructor):\n profile_courses = Course.objects.filter(creator=user.id)\n else:\n profile_courses = user_profile.courses.all()\n course_calendars = []\n for course in profile_courses:\n course_calendars.append({'course': course, 'events': course.cal.event_set.all()})\n response_object['course_calendars'] = course_calendars;\n return render_to_response('scheduler/calendar_basic.html',\n response_object, context)\n else:\n # No context variables to pass to the template system, hence the\n # blank dictionary object...\n return render_to_response('/login.html', {}, context)", "def handle_get_calendar(user_id, id):\n if not(get_user_global_preferences(user_id)):\n return not_found(jsonify(dict(error='User not found')))\n\n calendar = get_calendar(user_id, id)\n\n if calendar:\n\n response = dict(\n name=calendar.name,\n description=calendar.description,\n base=calendar.base,\n color=calendar.color,\n active=calendar.active,\n carbon=calendar.carbon,\n preferences=calendar.preferences\n )\n\n return ok(jsonify(response))\n\n return not_found(jsonify(dict(error='Calendar not found')))", "def test_calendar_view_list(self):\n response = self.client.get('/module/calendar/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'frontend/appointment/calendar/list.html')\n\n request = self.factory.get('/module/calendar/')\n request.user = self.user\n request.session = {}\n response = calendar_list(request)\n self.assertEqual(response.status_code, 200)", "def gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps\n ('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = \\\n ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"),\n 401)\n print \"Token's client ID does not match app's.\"\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps\n ('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n # ADD PROVIDER TO LOGIN SESSION\n login_session['provider'] = 'google'\n\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 300px; height: 300px;\\\n border-radius: 150px;-webkit-border-radius: \\\n 150px;-moz-border-radius: 150px;\"> '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print \"done!\"\n return output", "def current_events(service, calander_id):\n event = service.events().get(calendarId='[email protected]', eventId=calander_id).execute()\n return event", "def gconnect():\n\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Obtain authorization code\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = (\n \"https://www.googleapis.com/\"\n \"oauth2/v1/tokeninfo?access_token=%s\") % access_token\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n login_session['username'] = data['name']\n\n # Save user info into db if not exist\n save_user_if_not_exist(login_session['username'])\n\n return redirect(url_for('home'))", "def oauth2callback():\n app.logger.debug(\"Entering oauth2callback\")\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope= SCOPES,\n redirect_uri=flask.url_for('oauth2callback', _external=True))\n ## Note we are *not* redirecting above. We are noting *where*\n ## we will redirect to, which is this function. \n \n ## The *second* time we enter here, it's a callback \n ## with 'code' set in the URL parameter. If we don't\n ## see that, it must be the first time through, so we\n ## need to do step 1. \n app.logger.debug(\"Got flow\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n ## This will redirect back here, but the second time through\n ## we'll have the 'code' parameter set\n else:\n ## It's the second time through ... we can tell because\n ## we got the 'code' argument in the URL.\n app.logger.debug(\"Code was in flask.request.args\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n ## Now I can build the service and execute the query,\n ## but for the moment I'll just log it and go back to\n ## the main screen\n app.logger.debug(\"Got credentials\")\n return flask.redirect(flask.url_for('choose'))", "def oauth2callback():\n app.logger.debug(\"Entering oauth2callback\")\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope= SCOPES,\n redirect_uri=flask.url_for('oauth2callback', _external=True))\n ## Note we are *not* redirecting above. We are noting *where*\n ## we will redirect to, which is this function. \n \n ## The *second* time we enter here, it's a callback \n ## with 'code' set in the URL parameter. If we don't\n ## see that, it must be the first time through, so we\n ## need to do step 1. \n app.logger.debug(\"Got flow\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n ## This will redirect back here, but the second time through\n ## we'll have the 'code' parameter set\n else:\n ## It's the second time through ... we can tell because\n ## we got the 'code' argument in the URL.\n app.logger.debug(\"Code was in flask.request.args\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n ## Now I can build the service and execute the query,\n ## but for the moment I'll just log it and go back to\n ## the main screen\n app.logger.debug(\"Got credentials\")\n return flask.redirect(flask.url_for('choose'))", "def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def create_google_drive_service(self):\n credentials = self.get_credentials()\n http = credentials.authorize(httplib2.Http())\n return discovery.build('drive', 'v3', http=http)", "def list_namespaced_o_auth_client_authorization(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_o_auth_client_authorization\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/oauthclientauthorizations'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1OAuthClientAuthorizationList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_list_o_auth_client_authorization(self):\n pass", "def gconnect():\n\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = (\"https://www.googleapis.com/oauth2/\"+\\\n \"v1/tokeninfo?access_token=%s\" % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps(\n \"Token's user ID doesn't match given user ID.\"\n ), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps(\n 'Current user is already connected.'\n ), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Store the access token in the session for later use.\n login_session['provider'] = 'google'\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n output = ''\n output += '<div class=\"container text-center\">'+\\\n '<div class=\"row justify-content-md-center\">'+\\\n '<div class=\"col-md-8 border p-1 m-1\"><pclass=\"m-1\">Welcome, '\n output += login_session['username']\n output += '!</p>'\n output += '<div class=\"d-flex justify-content-center m-1\">'+\\\n '<img class=\"rounded mx-auto d-block\" '+\\\n 'width=\"30%\" src=\"'\n output += login_session['picture']\n output += '\"></div></div></div></div>'\n return output", "def gconnect():\r\n # Validate state token\r\n if request.args.get('state') != login_session['state']:\r\n response = make_response(json.dumps('Invalid state parameter.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n # Obtain authorization code\r\n code = request.data\r\n\r\n try:\r\n # Upgrade the authorization code into a credentials object\r\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\r\n oauth_flow.redirect_uri = 'postmessage'\r\n credentials = oauth_flow.step2_exchange(code)\r\n except FlowExchangeError:\r\n response = make_response(\r\n json.dumps('Failed to upgrade the authorization code.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Check that the access token is valid.\r\n access_token = credentials.access_token\r\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\r\n % access_token)\r\n h = httplib2.Http()\r\n result = json.loads(h.request(url, 'GET')[1])\r\n # If there was an error in the access token info, abort.\r\n if result.get('error') is not None:\r\n response = make_response(json.dumps(result.get('error')), 500)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Verify that the access token is used for the intended user.\r\n gplus_id = credentials.id_token['sub']\r\n if result['user_id'] != gplus_id:\r\n response = make_response(\r\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Verify that the access token is valid for this app.\r\n if result['issued_to'] != CLIENT_ID:\r\n response = make_response(\r\n json.dumps(\"Token's client ID does not match app's.\"), 401)\r\n print(\"Token's client ID does not match app's.\")\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n stored_access_token = login_session.get('access_token')\r\n stored_gplus_id = login_session.get('gplus_id')\r\n if stored_access_token is not None and gplus_id == stored_gplus_id:\r\n response = make_response(json.dumps('Current user is already connected.'),\r\n 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n # Store the access token in the session for later use.\r\n login_session['access_token'] = credentials.access_token\r\n login_session['gplus_id'] = gplus_id\r\n\r\n # Get user info\r\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\r\n params = {'access_token': credentials.access_token, 'alt': 'json'}\r\n answer = requests.get(userinfo_url, params=params)\r\n data = json.loads(answer.text)\r\n\r\n login_session['username'] = data['email']\r\n login_session['picture'] = data['picture']\r\n login_session['email'] = data['email']\r\n\r\n user_id = getUserID(login_session['email'])\r\n if not user_id:\r\n user_id = createUser(login_session)\r\n login_session['user_id'] = user_id\r\n\r\n output = ''\r\n output += '<h3>Welcome, '\r\n output += login_session['username']\r\n output += '!</h3>'\r\n output += '<img src=\"'\r\n output += login_session['picture']\r\n output += ' \" style = \"width: 50px; height: 50px;border-radius: 150px;\"\"'\r\n output += '\"-webkit-border-radius: 150px;-moz-border-radius: 150px;\"> '\r\n flash(\"you are now logged in as %s\" % login_session['username'])\r\n print(\"done!\")\r\n return output", "def gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print \"Token's client ID does not match app's.\"\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_credentials = login_session.get('credentials')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n flash(\n \"You are now logged in as %s\" % login_session['username'],\n 'alert-success')\n print \"done!\"\n return render_template(\n 'welcome.html',\n username=login_session['username'],\n picture=login_session['picture'])", "def main():\n parser = argparse.ArgumentParser(parents=[tools.argparser])\n parser.add_argument('config', type=argparse.FileType('r'), help='YAML config file')\n global args, config\n args = parser.parse_args()\n config = yaml.safe_load(args.config)\n \n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n spaceapi_delay = datetime.timedelta(seconds=config['spaceapi delay seconds'])\n fail_state = False\n \n while True:\n now_str = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n eventsResult = service.events().list(\n calendarId=config['calendar id'], timeMin=now_str, maxResults=10 , singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n \n # Filter full-day events\n events = [e for e in events if 'dateTime' in e['start']]\n # Filter based on summary regex\n events = [e for e in events if re.match(config['event summary filter'], e['summary'])]\n \n for e in events:\n # Parse dateTime\n e['start']['dateTime'] = dateutil.parser.parse(e['start']['dateTime'])\n e['end']['dateTime'] = dateutil.parser.parse(e['end']['dateTime'])\n \n if not events:\n print('No upcoming events found.')\n return\n else:\n print('Relevant event:')\n print(event_to_string(events[0]))\n\n now = datetime.datetime.now(pytz.utc)\n open_door_required = (events[0]['start']['dateTime'] + spaceapi_delay < now)\n \n if not open_door_required:\n fail_state = False\n print('Don\\'t care (not yet started)')\n delta = events[0]['start']['dateTime'] + spaceapi_delay - now\n sleep = min(config['refresh seconds'], max(0, delta.days*3600*24 + delta.seconds))\n print('Sleeping for {}s until {}'.format(\n sleep, events[0]['start']['dateTime'] + spaceapi_delay))\n time.sleep(sleep)\n continue\n \n open_state = requests.get(config['spaceapi url']).json()['state']['open']\n\n delta = events[0]['end']['dateTime'] - now\n sleep = min(config['poll seconds'], max(0, delta.days*3600*24 + delta.seconds))\n if not open_state:\n if not fail_state:\n print('OH NOES... we suck... (ongoing open time, but door closed)')\n fail_state = True\n mail('{}\\n\\n{}'.format(config['fail text'], event_to_string(events[0])))\n else:\n print('still failing :(')\n print('Sleeping for {}s'.format(sleep))\n time.sleep(sleep)\n continue\n else:\n if fail_state:\n print('Opened just now! :) (ongoing open time and door open)')\n fail_state = False\n mail('{}\\n\\n{}'.format(config['yay text'], event_to_string(events[0])))\n else:\n print('All good :) (ongoing open time and door open)')\n print('Sleeping for {}s'.format(sleep))\n time.sleep(sleep)\n continue", "def create_service(flags, client_id, client_secret):\n flow = OAuth2WebServerFlow(\n client_id=client_id,\n client_secret=client_secret,\n scope='https://www.googleapis.com/auth/drive.readonly',\n redirect_uri='http://localhost')\n storage = Storage('oauth_storage')\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(httplib2.Http())\n return build('drive', 'v2', http=http)", "def gconnect():\n # Validate that the state token we sent and the one we received are the same.\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({\n 'error': 'Invalid state parameter',\n 'expected': login_session['state']\n }),\n 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code(it was sent to us by google.)\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object.\n # flow_from_client_secrets(client_secret file_path,scope='')\n # creates a flow object using the client's secret file.\n oauth_flow = flow_from_clientsecrets(secrets_path, scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps({'error': 'Failed to upgrade the authorization code'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Let's check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If we get an error, we abort.\n if result.get('error') is not None:\n response = make_response(json.dumps({'error': result.get('error')}), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps({'error': 'Token user IDs do not match apps'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps({'error': 'Token client IDs do not match apps'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Is the user already logged in?\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User already connected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later user.\n login_session['provider'] = 'google'\n login_session['access_token'] = credentials.access_token\n credentials = AccessTokenCredentials(login_session['access_token'], 'user-agent-value')\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer_json = requests.get(userinfo_url, params=params).json()\n\n login_session['username'] = answer_json['name']\n login_session['picture'] = answer_json['picture']\n login_session['email'] = answer_json['email']\n\n # see if user exists, otherwise create a new one.\n user_id = get_user_id(answer_json['email'])\n if not user_id:\n user_id = create_user()\n login_session['user_id'] = user_id\n\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User connected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response", "def get_service(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'client_id.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n return service", "def oauthcallback():\n code = request.args.get('code') # gets code from auth server\n if code:\n credentials = oauth.flow.step2_exchange(code) # method to exchange code & client secret \n # for credentials object (authenticate client)\n print 'CREDENTIALS RETURNED:'\n print credentials.get_access_token() # Returns access token and its expiration information. If the token does not exist, get one. If the token expired, refresh it.\n\n # add credentials to session\n oauth_credentials = credentials.get_access_token()\n oauth_token = oauth_credentials[0]\n oauth_expiry = datetime.datetime.now() + datetime.timedelta(seconds=oauth_credentials[1])\n print credentials\n refresh_token = credentials.refresh_token\n session['oauth_token'] = oauth_token\n session['oauth_expiry'] = oauth_expiry\n\n # gets user info from google\n first_name, last_name, email = get_user_info_from_google(oauth_token)\n\n # creates or updates user in the contacts database & redirects to account page\n create_update_user_in_db(credentials, email, first_name, last_name, oauth_token, oauth_expiry, refresh_token)\n\n # issue get request to Google Contacts API for user contacts and pipe data into contact_output.txt\n get_google_contacts(credentials) \n\n # clean data out of file and return list of contact dictionaries\n contact_list = clean_google_contact_data(email) \n\n save_user_contacts_to_db(int(session['user_id']), contact_list)\n\n return redirect('/account_home')\n\n else:\n flash(\"Something went wrong.\")\n return redirect(\"/\")", "def gconnect():\n\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('Current user already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['email'] = data['email']\n\n # See if a user exists, if it doesn't make a new one\n\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n return output", "def create_service():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=9797)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def gdrive_service(secrets: Dict):\n return build(\n \"drive\", \"v3\", credentials=google_credentials(secrets), cache_discovery=False\n )", "def fhir_service(request):\n\n # SERVICE = OAuth2Service(name=\"CMS BlueButton FHIR\",\n # client_id=CLIENT_ID,\n # client_secret=CLIENT_SECRET,\n # access_token_url=TOKEN_URL,\n # authorize_url=AUTH_URL,\n # base_url=settings.OAUTH_TEST_INFO['BASE']\n # )\n\n # service = OAuth2Service(\n # name=\"CMS BlueButton FHIR\",\n # client_id=CLIENT_ID,\n # client_secret=CLIENT_SECRET,\n # access_token_url=TOKEN_URL,\n # authorize_url=AUTH_URL,\n # base_url=settings.OAUTH_TEST_INFO['BASE']\n # )\n\n # code = uuid4()\n # raw = service.get_raw_access_token()\n # if settings.DEBUG:\n # print(\"Raw:\", raw)\n state = create_state()\n\n params = {'client_id': CLIENT_ID,\n 'redirect_uri': REDIRECT_URI,\n 'state': state,\n 'response_type': 'code',\n }\n\n url = SERVICE.get_authorize_url(**params)\n\n if settings.DEBUG:\n print(\"Authorization URL:\", url)\n\n return HttpResponseRedirect(url)", "def verified_calendar(context, owner_type, owner_id, user):\n\n if (owner_type == 'user'):\n if (user.id == int(owner_id)):\n calendar = UserProfile.objects.get(user=user).cal\n edit_priv = True\n else:\n #return HttpResponse('Sorry, this is not your own profile!')\n return render_permission_denied(context, 'access this user\\'s calendar')\n elif (owner_type == 'school'):\n profile = UserProfile.objects.get(user=user)\n if (profile.school.id == int(owner_id)):\n calendar = profile.school.cal\n edit_priv = profile.school.admin.id == user.id\n else:\n #return HttpResponse('Sorry, this is not your school!')\n return render_permission_denied(context, 'access this school\\'s calendar')\n elif (owner_type == 'course'):\n profile = UserProfile.objects.get(user=user)\n course = profile.courses.filter(id=int(owner_id))[:1]\n # If the user is enrolled in a course and the school\n if course and course[0].school.id == profile.school.id:\n calendar = course[0].cal\n\n #If student\n if (Student.objects.filter(user=user)):\n edit_priv = False\n if (course[0].student_admins.filter(id=int(profile.id))):\n edit_priv = True\n else:\n course = Course.objects.filter(id=int(owner_id))[:1]\n #If teacher\n if course and course[0].creator.id == profile.user.id:\n edit_priv = True\n calendar = course[0].cal\n else:\n return render_permission_denied(context, ' access this course\\'s calendar')\n return (calendar, edit_priv)", "def get_drive_services(username):\n g_creds = generate_google_token_from_db(user=username) # google drive creds\n d_creds = generate_dropbox_token_from_db(user=username) # dropbox creds\n # building google drive service\n g_service = build('drive', 'v3', credentials=g_creds)\n # building dropbox service \n d_service = dropbox.Dropbox(app_key=dropbox_app_key, \n oauth2_access_token=d_creds['oauth2_access_token'],\n oauth2_refresh_token=d_creds['oauth2_refresh_token'], \n app_secret=dropbox_app_secret) \n d_service.check_and_refresh_access_token()\n return g_service, d_service", "def get_session(cred_file=\"mystic_creds.json\"):\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(cred_file,\n scope)\n #print(credentials)\n gc = gspread.authorize(credentials)\n return gc", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 20 events')\n try:\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n if not events:\n print('No upcoming events found.')\n text_file = open(\"scheduledActions.txt\", \"wb\") #May want to use a check on the msg type to only overwrite calendar tasks\n # text_file.write(bytes('Updated '+now[:-8]+'\\n','UTF-8'))\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n start = start[:22] + start[-2:] #Trims the last colon\n start = datetime.datetime.strptime(start,'%Y-%m-%dT%H:%M:%S%z')\n start = int(time.mktime(start.timetuple()))\n end = event['end'].get('dateTime', event['end'].get('date'))\n end = end[:22] + end[-2:] #Trims the last colon\n end = datetime.datetime.strptime(end,'%Y-%m-%dT%H:%M:%S%z')\n end = int(time.mktime(end.timetuple()))\n description = event['description']\n if description.count(',')==5:\n desc1=description.split(\",\")[0] + \",\" + description.split(\",\")[1] + \",\" + description.split(\",\")[2]\n print(start,desc1)\n writeString=str(start)+','+desc1+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n desc2=description.split(\",\")[3] + \",\" + description.split(\",\")[4] + \",\" + description.split(\",\")[5]\n print(end,desc2)\n writeString=str(end)+','+desc2+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n else:\n print(start, description) #event['summary'] event['location']\n writeString=str(start)+','+description+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n text_file.close()\n print('Calendar read complete.')\n except httplib2.ServerNotFoundError:\n print(\"!---- Looks like there's no internet connection just now. Wait till tomorrow.\")", "def get_edit_google_service_account(cls, handler):\n\n if roles.Roles.is_super_admin():\n # ?tab= for v1.9, ?action= for v1.8\n exit_url = '%s?tab=google_service_account' % handler.LINK_URL\n else:\n exit_url = cls.request.referer\n key = handler.request.get('key')\n namespace = handler.request.get('namespace')\n credential_type = handler.request.get('credential_type')\n\n extra_args = {\n 'namespace': namespace,\n 'credential_type': credential_type,\n 'xsrf_token': cgi.escape(\n handler.create_xsrf_token('edit-service_account'))\n }\n delete_url = rest_url = GoogleServiceAccountRESTHandler.URI\n\n template_values = {}\n template_values['page_title'] = handler.format_title(\n 'Add Google Service Account')\n template_values['main_content'] = oeditor.ObjectEditor.get_html_for(\n cls, GoogleServiceAccountRESTHandler.SCHEMA_JSON,\n GoogleServiceAccountRESTHandler.ANNOTATIONS_DICT,\n key, rest_url, exit_url,\n extra_args=extra_args,\n delete_url=delete_url, delete_method='delete',\n save_button_caption='Save')\n if not template_values['main_content']:\n logging.error('Main content could not be loaded')\n handler.render_page(template_values)", "def getAPIservice(args, name, version, client_secrets_file, scope=None, parents=[], discovery_filename=None):\n if scope is None:\n scope = 'https://www.googleapis.com/auth/' + name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend(parents)\n parser = argparse.ArgumentParser(\n description=\"Google API v3 Service Provider\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(args)\n print(\"args = %s\" % (args))\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n # client_secrets = os.path.join(os.path.dirname(filename),\n # 'client_secrets.json')\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets_file,\n scope=scope,\n message=tools.message_if_missing(client_secrets_file))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http = httplib2.Http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return (service, flags)", "def __init__(self, credentials):\n http = credentials.authorize(httplib2.Http())\n self.service = googleapiclient.discovery.build(\"drive\", \"v2\", http=http)", "def get_service():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('/var/jail/home/team28/final_project/python/EmailApp/token.json'):\r\n creds = Credentials.from_authorized_user_file('/var/jail/home/team28/final_project/python/EmailApp/token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file('/var/jail/home/team28/final_project/python/EmailApp/credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('/var/jail/home/team28/final_project/python/EmailApp/token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service", "def authenticate():\n with open(APP_KEYS_FILE) as f:\n app_keys = json.load(f)\n storage = Storage(USER_OAUTH_DATA_FILE)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(\n OAuth2WebServerFlow(\n client_id=app_keys['APP_CLIENT_ID'],\n client_secret=app_keys['APP_CLIENT_SECRET'],\n scope=['https://www.googleapis.com/auth/reminders'],\n user_agent='google reminders cli tool'),\n storage,\n )\n auth_http = credentials.authorize(httplib2.Http())\n return auth_http", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def get_Analytics_service():\n #reference: https://developers.google.com/analytics/devguides/reporting/core/v4/\n credentials = get_credentials()\n \n http = httplib2.Http()\n http = credentials.authorize(http)\n service = apiclient.discovery.build('analytics', 'v4', http=http)\n print \"Got Analytics service\"\n\n return service", "def fusion_api_get_service_access(self, host=None, api=None, headers=None):\n return self.service_access.get(host, api, headers)", "async def test_calendars_http_api(hass, hass_client):\n await async_setup_component(hass, \"calendar\", {\"calendar\": {\"platform\": \"demo\"}})\n await hass.async_block_till_done()\n client = await hass_client()\n response = await client.get(\"/api/calendars\")\n assert response.status == 200\n data = await response.json()\n assert data == [\n {\"entity_id\": \"calendar.calendar_1\", \"name\": \"Calendar 1\"},\n {\"entity_id\": \"calendar.calendar_2\", \"name\": \"Calendar 2\"},\n ]", "def auth(scope='https://mail.google.com', file_name='credentials.json', svc='gmail', version='v1'):\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(fn, scope)\n creds = tools.run_flow(flow, store)\n service = build(svc, version, http=creds.authorize(Http()))\n return service", "def _make_service_request(self, service_name=None, operation=None, id_param=None):\n if not service_name:\n if self.develop_mode:\n # Return a list of available services\n result = dict(available_services=get_service_registry().services.keys())\n return result\n else:\n raise BadRequest(\"Service name missing\")\n service_name = str(service_name)\n\n if not operation:\n if self.develop_mode:\n # Return a list of available operations\n result = dict(available_operations=[])\n return result\n else:\n raise BadRequest(\"Service operation missing\")\n operation = str(operation)\n\n # Apply service white list and black list for initial protection and get service client\n service_def = self.get_secure_service_def(service_name)\n target_client = service_def.client\n\n # Get service request arguments and operation parameter values request\n req_args = self._get_request_args()\n\n param_list = self.create_parameter_list(service_def, operation, req_args, id_param)\n\n # Validate requesting user and expiry and add governance headers\n ion_actor_id, expiry = self.get_governance_info_from_request(req_args)\n in_login_whitelist = self.in_login_whitelist(\"request\", service_name, operation)\n ion_actor_id, expiry = self.validate_request(ion_actor_id, expiry, in_whitelist=in_login_whitelist)\n param_list[\"headers\"] = self.build_message_headers(ion_actor_id, expiry)\n\n # Make service operation call\n client = target_client(process=self.process)\n method_call = getattr(client, operation)\n result = method_call(**param_list)\n\n return result", "def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials", "def awa_provide_authorization_result(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Awa/AwaProvideAuthorizationResult/\"))", "def Access(self):\n if datetime.now() < self.access_exp:\n pass\n elif datetime.now() > self.access_exp and datetime.now() < self.refresh_exp:\n grant = 'refresh_token'\n self._postRequest(grant=grant)\n elif datetime.now() > self.refresh_exp:\n grant = 'authorization_code'\n self._getURLcode()\n self._postRequest(grant=grant)", "def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def initial(self, request, *args, **kwargs):\n super(OdooApi, self).initial(request, *args, **kwargs)\n self.check_service_permission(request, kwargs.get('service_path'))" ]
[ "0.6547268", "0.64876544", "0.62233627", "0.61843467", "0.614813", "0.6039895", "0.5898657", "0.5767838", "0.5764027", "0.5762848", "0.5696502", "0.5657998", "0.5613315", "0.5523032", "0.54875094", "0.5481039", "0.5432056", "0.5416046", "0.540904", "0.5369274", "0.5323026", "0.5320476", "0.5304401", "0.52694136", "0.52614355", "0.525763", "0.5252105", "0.52347267", "0.5226494", "0.5224404", "0.5223857", "0.5213145", "0.5203104", "0.5185525", "0.5183722", "0.51698947", "0.5164748", "0.5149039", "0.5139086", "0.5138385", "0.5122463", "0.5116694", "0.50975484", "0.5091018", "0.50905603", "0.5088546", "0.5080669", "0.5077801", "0.50724804", "0.50687134", "0.5049731", "0.50490063", "0.5043302", "0.5039926", "0.5037403", "0.503005", "0.5018343", "0.500864", "0.5007863", "0.50022256", "0.49971667", "0.4993563", "0.4993563", "0.49897197", "0.49845448", "0.49735114", "0.49629635", "0.49603617", "0.49599117", "0.495802", "0.4956842", "0.494998", "0.49491486", "0.4947781", "0.4945791", "0.49437845", "0.4937517", "0.49329472", "0.49110413", "0.4882841", "0.48753783", "0.48627815", "0.48620653", "0.48535174", "0.48534942", "0.48498884", "0.48471436", "0.48428205", "0.48411363", "0.48340148", "0.48255113", "0.48172018", "0.48124808", "0.48118573", "0.48015752", "0.47984907", "0.47961926", "0.47943643", "0.478842" ]
0.631776
3
The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on.
def oauth2callback(): app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_step(self) -> None:", "def next_step(self):\n self.proceed()\n self.execute_current()", "def _step(self) -> None:", "def step_forward(self):", "def _step(self):\n pass", "def step(self):\n\n pass", "def _step(self, whence):\n pass", "def getCurrentStep():", "def step(self):\n while self.state != STATE_TERMINAL:\n self.step_strategies[self.state]()", "def proceed(self):\n if self.current_step is None or self.step_position == StepPosition.Before:\n return\n\n for condition, transition in self.current_step.conditions:\n if condition.satisfied():\n new_proc = transition.procedure\n self.current_procedure_id = new_proc\n self.current_step = self._suite[new_proc].steps[transition.step]\n self.step_position = StepPosition.Before\n break", "def step(self):\r\n raise NotImplementedError", "def step(self):\n raise NotImplementedError", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()", "def step(self, state):", "def perform_step(self) -> None:\n pass", "def step_forward(self):\n if self.state_num < len(self.steps):\n print(\"\\nStepping forward to state %d.\" % int(self.state_num + 1))\n self.state_string[0] = \"Stepping forward to state \" + str(self.state_num + 1) + \".\"\n # Get process and resource involved.\n process = self.steps[self.state_num][0]\n resource = self.steps[self.state_num][2]\n # Is this a request?\n if self.steps[self.state_num][1]:\n print(\"Process %d requests resource %d.\" % (process, resource))\n self.state_string[1] = \"Process \" + str(process) + \" requests resource \" + str(resource) + \".\"\n # Is the resource not being used by a process?\n if self.available[resource] > 0:\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][process] += 1\n # Make resource unavailabe.\n self.available[resource] -= 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = process\n else:\n # Mark in request matrix the relationship between resource and process.\n self.request_edges[resource][process] += 1\n # Add our process to the graph and make a directed edge.\n if process not in self.graph:\n self.graph.add_vertex(process)\n if self.connected_v[resource] not in self.graph:\n self.graph.add_vertex(self.connected_v[resource])\n if not self.graph.does_edge_exist(process, self.connected_v[resource]):\n self.graph.add_edge(process, self.connected_v[resource])\n print(\"p{:d} --> p{:d}\".format(process, self.connected_v[resource]))\n else:\n print(\"Process %d releases resource %d.\" % (process, resource))\n self.state_string[0] = \"Process \" + str(process) + \" releases resource \" + str(resource) + \".\"\n # Remove connection in hold matrix.\n self.hold_edges[resource][process] -= 1\n # Does another process want this resource?\n if np.count_nonzero(self.request_edges[resource]) > 0:\n # Get next process that wants the resource.\n new_process = self.request_edges[resource].index(1)\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][new_process] += 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = new_process\n # Remove connection in request matrix.\n self.request_edges[resource][new_process] -= 1\n # Delete edge if it exists.\n if self.graph.does_edge_exist(new_process, self.connected_v[resource]):\n self.graph.delete_edge(new_process, self.connected_v[resource])\n print(\"Process %d now has resource %d.\" % (new_process, resource))\n self.state_string[1] = \"Process \" + str(new_process) + \" now has resource \" + str(resource) + \".\"\n else:\n print(\"Resource %d is now available.\" % resource)\n self.state_string[1] = \"Resource \" + str(resource) + \" is now available.\"\n # Mark resource as unowned by a process.\n self.available[resource] += 1\n # Empty process that owned the resource previously.\n self.connected_v[resource] = None\n # Advance the state.\n self.state_num += 1", "def getSteps():", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def next_step(self):\n if self.time_point + 1 >= len(self.data):\n print(\"Error: at last time point\")\n else:\n self.time_point = self.time_point + 1\n self.load_frame()", "def step(self, **kwargs):\n pass", "def step(self, action):", "def step(self, step=None):\n pass", "def step(self):\n return self._step", "def step ( self ) :\n return self.__step", "def step(self):\n self.latent.step()", "def hdlStep(self):\n if not self.current == self.cfg[\"GOAL\"] and self.timeremaining:\n\n try:\n if self.timeout < float('inf'):\n with Timeout(self.timeout): # call under SIGNAL\n nextreturn = self.gen.next()\n else:\n nextreturn = self.gen.next() # call with no SIGNAL\n except Timeout.Timeout:\n if self.timeremaining < 0:\n self.timeremaining = 0\n self.updateStatus(\"Timeout!\", False)\n else:\n self.updateStatus(\"No path found\", False)\n self.hdlStop()\n except:\n self.updateStatus(\"Agent returned \" + str(nextreturn), False)\n self.hdlStop()\n else: # try/except/else...\n # does nextreturn include a list of coordinates to draw?\n if isinstance(nextreturn[1], (list, tuple)):\n nextstep, coordsets = nextreturn\n for coordset in coordsets:\n if coordset[1] == 'reset':\n self.gui.vmap.clear(coordset[0], self.lmap)\n else:\n self.gui.vmap.drawSet(coordset[0], coordset[1])\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.fullsearchflag = True\n self.coordsets = coordsets\n self.updateStatus(\"Plotting path...\", False)\n else:\n # nextreturn just includes the next coordinate, no drawing data\n nextstep = nextreturn\n\n # Paint path\n self.gui.vmap.drawSet(self.path, \"blue\")\n self.gui.vmap.drawPoint(nextstep, \"white\")\n self.current = nextstep\n self.path.add(nextstep)\n if isinstance((self.pathcost), int):\n currcost = str(self.pathcost)\n else:\n currcost = '{0:.2f}'.format(self.pathcost)\n message = str(nextstep) + \" | Cost : \" + currcost + \\\n \" | Steps : \" + str(self.pathsteps)\n if self.cfg.get(\"DEADLINE\"):\n message += \" | Time remaining: \" + \\\n str(self.timeremaining)\n self.updateStatus(message)\n sleep(self.cfg.get(\"SPEED\")) # delay, if any", "def stepGenerator(self, current, target):\n\n while True:\n target = self.cfg[\"GOAL\"]\n if self.gotscript:\n if self.pathsteps in self.tc:\n terrain, topleft, botright = self.tc.get(self.pathsteps)\n pointlist = p4.getBlock(topleft, botright)\n # change logical map\n self.lmap.setPoints(terrain, pointlist)\n # change in gui, if running\n try:\n self.gui.clearPoints(pointlist)\n except:\n pass\n if self.pathsteps in self.gc:\n target = self.lmap.nearestPassable(self.gc.get(self.pathsteps))\n self.setGoal(target)\n if self.pathsteps in self.ac:\n newpos = p4.addVectors(current, self.ac.get(self.pathsteps))\n current = self.lmap.nearestPassable(newpos)\n yield newpos # scripted move is not costed or counted\n try:\n clockstart = timer() # start timer\n nextreturn = self.agent.getNext(self.lmap, current, target, self.timeremaining)\n logging.debug(nextreturn)\n clockend = timer()\n except:\n raise p4.BadAgentException()\n\n # Only time first step unless operating in 'realtime' mode. If this is realtime, and the step involved no reasoning (took less than FREE_TIME) do not count its time\n if ((not self.cfg.get(\"REALTIME\") and self.pathtime) or (\n (clockend - clockstart) < self.cfg.get(\"FREE_TIME\"))):\n steptime = 0\n else:\n steptime = (clockend - clockstart)\n previous = current\n\n # Agent may have returned single step or step plus sets of coords and colors.\n # Try/except distinguishes between them\n try:\n x = nextreturn[1][0] # fails if nextreturn is coord only\n current, configsets = nextreturn\n except TypeError:\n current = nextreturn\n finally:\n self.pathsteps += 1\n self.pathtime += steptime\n self.timeremaining -= steptime\n\n # We now consider every door open. In fact, we are just computing the final path cost, we are not\n # searching for it. So is reasonable to assume that I have all the keys along the path.\n allkeys = [k for k in self.lmap.key_and_doors.keys()]\n cost = self.lmap.getCost(current, previous, allkeys)\n # self.pathcost += self.lmap.getCost(current, previous, allkeys)\n if not self.lmap.isAdjacent(current, previous):\n cost = float('inf')\n # agent has made illegal move:\n if cost == float('inf'):\n self.updateStatus(\"Illegal move at \" + str(current) + \":\" + str(self.lmap.getCost(current)), False)\n if self.cfg[\"STRICT\"]:\n current = previous\n nextreturn = previous\n self.pathsteps -= 1\n cost = 0\n self.pathcost += cost\n yield nextreturn", "def execute(self, flow: Flow):\n while True:\n autosteps = flow.next_autosteps()\n steps = flow.next_steps()\n\n if not steps:\n log.debug(\"Flow ended correctly.Nothing left to do.\")\n with self._lock:\n self.in_flight.remove(flow)\n break\n\n if not autosteps and flow.current_step.hints:\n possible_next_steps = [f'You are in the flow **{flow.name}**, you can continue with:\\n\\n']\n for step in steps:\n cmd = step.command\n cmd_fnc = self._bot.all_commands[cmd]\n reg_cmd = cmd_fnc._err_re_command\n syntax_args = cmd_fnc._err_command_syntax\n reg_prefixed = cmd_fnc._err_command_prefix_required if reg_cmd else True\n syntax = self._bot.prefix if reg_prefixed else ''\n if not reg_cmd:\n syntax += cmd.replace('_', ' ')\n if syntax_args:\n syntax += syntax_args\n possible_next_steps.append(f'- {syntax}')\n self._bot.send(flow.requestor, '\\n'.join(possible_next_steps))\n break\n\n log.debug('Steps triggered automatically %s.', ', '.join(str(node) for node in autosteps))\n log.debug('All possible next steps: %s.', ', '.join(str(node) for node in steps))\n\n for autostep in autosteps:\n log.debug(\"Proceeding automatically with step %s\", autostep)\n if autostep == FLOW_END:\n log.debug('This flow ENDED.')\n with self._lock:\n self.in_flight.remove(flow)\n return\n try:\n msg = Message(frm=flow.requestor, flow=flow)\n result = self._bot.commands[autostep.command](msg, None)\n log.debug('Step result %s: %s', flow.requestor, result)\n\n except Exception as e:\n log.exception('%s errored at %s', flow, autostep)\n self._bot.send(flow.requestor, f'{flow} errored at {autostep} with \"{e}\"')\n flow.advance(autostep) # TODO: this is only true for a single step, make it forkable.\n log.debug('Flow execution suspended/ended normally.')", "def go_to_next_state(self):\n pass", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def step(self):\n # Pull data from the first available input channel.\n\n input_bag = self.get()\n\n # todo add timer\n self.handle_results(input_bag, input_bag.apply(self._stack))", "def train_loop_post(self, current_step):\r\n pass", "def run_one_step(self):\n pass", "def next_step(self, p1, p2):\n\n self.index += 1\n out('Protocol step' + str(self.index))\n\n if self.index == 1:\n return prepare(p1, p2)\n\n elif self.index == 2:\n return share_graphs(p1, p2)\n\n elif self.index == 3:\n if turn(p1, p2):\n # The game is over\n\n return False\n else:\n # It's a draw. The game needs a new turn\n self.index -= 1\n return False\n\n elif self.index == 4:\n if play_again(p1, p2):\n self.index = 2\n return False\n else:\n return True", "def step(self):\n self.function()", "def step(self, move):", "def step(self, actions):\n self.step_async(actions)\n return self.step_wait()", "def step(self, actions):\n self.step_async(actions)\n return self.step_wait()", "def _run_next_state(self):\n if self.state != \"STOP\":\n self.state = self.get_state_info(\"next\")\n self._run_state()", "def train_loop_pre(self, current_step):\r\n pass", "def step(self, action):\n pass", "def step(self, action):\n pass", "def step(self):\n self.driver.step()", "async def on_step(self, iteration: int):\n raise NotImplementedError", "def runStep(self):\n if self.done:\n pass\n elif self.frame_num < self.num_iters:\n start, end = self.t, self.t + 1\n frame = self.data[start:end, :]\n t = time.time()\n id = self.client.put([self.t, frame], \"acq_bubble\" + str(self.frame_num))\n self.timestamp.append([time.time(), self.frame_num])\n try:\n self.q_out.put([str(self.frame_num), id])\n self.frame_num += 1\n self.t += self.l\n # also log to disk #TODO: spawn separate process here?\n except Exception as e:\n logger.error(\"Acquirer general exception: {}\".format(e))\n logger.error(traceback.format_exc())\n\n\n time.sleep(1/self.framerate) # pretend framerate\n self.total_times.append(time.time() - t)\n\n else: # simulating a done signal from the source\n logger.error(\"Done with all available frames: {0}\".format(self.frame_num))\n self.data = None\n self.q_comm.put(None)\n self.done = True # stay awake in case we get e.g. a shutdown signal", "def step(self, actions):\n assert (len(actions) == self.num_actions)\n actions = np.around(actions)\n actions = np.clip(actions, 0, 1)\n self.done = self.network.perform_actions(actions)\n self.cur_pos = self._get_current_pos_in_1d()\n self.reward = self.network.get_reward()\n\n return self.cur_pos, self.reward, self.done, {}", "def Advance():\n warp.step()", "def next_run(self):\n self.load_run(run=self.run+1)", "def step(\n self,\n actions,\n ) -> Tuple[\"next_state\", \"reward\", \"done\", \"env_info\"]:\n env_info = self.env.step(actions)[self.brain_name]\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n return (next_states, rewards, dones, env_info)", "def after_step():\n raise NotImplementedError", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def step_async(self, actions):", "def get_step():\n\n # Decide which direction to go and how far to go in that direction.\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n step = direction * distance\n\n # Reject moves that go nowhere.\n if step == 0:\n get_step()\n else:\n return step", "def step(self):\n curr_step = self.steplist[self.curr_step_idx] #dictionary\n insn_type = curr_step[\"type\"]\n\n #case over insn_types\n if insn_type == \"init\":\n self.exec_init(curr_step)\n self.curr_step_idx += 1\n\n elif insn_type == \"preprocess\":\n self.exec_preprocess(curr_step)\n self.curr_step_idx += 1\n\n elif insn_type == \"augment\":\n self.exec_augment(curr_step)\n self.curr_step_idx += 1\n \n elif insn_type ==\"combine\":\n self.exec_combine(curr_step)\n self.curr_step_idx += 1\n\n elif insn_type ==\"attention\":\n self.exec_attention(curr_step)\n self.curr_step_idx += 1", "def proceed(self):\n pass", "def flowingFrom(self, fount):", "def debuggerstep(self, action):\n\n\n\n #Calculate actual Next State you are supposed to reach via action\n if action == 0:\n nxtState = (self.state[0] - 1, self.state[1])\n elif action == 1:\n nxtState = (self.state[0], self.state[1] - 1)\n elif action == 2:\n nxtState = (self.state[0], self.state[1] + 1) \n elif action == 3: \n nxtState = (self.state[0] + 1, self.state[1])\n\n \n #BUT YOU CAN ONLY REACH THERE WITH 80% PROBABIBILITY\n #Stocasticity Implementation\n correctMove = random.random() \n \n #Check if nextState to reach is valid, Redundant Check (Might have to remove in future iterations)\n if self.isValid(nxtState): \n #If you have a valid next state, you reach there with 80% probability\n if correctMove <= 0.8: \n \n print(\"Ended up in correct state taking action \", end = \"\")\n if (action == 0): \n print(\"Up\")\n elif (action == 1): \n print(\"Left\")\n elif (action == 2): \n print(\"Right\")\n elif(action == 3): \n print(\"Down\")\n self.state = nxtState\n self.isEnd()\n return nxtState, self.giveReward(), self.isTerminal\n \n #Else you didn't end up in right place\n else: \n \n print(\"Ended up in wrong state. Had to go \", end = \"\")\n if (action == 0): \n print(\"Up \", end = \"\")\n elif (action == 1): \n print(\"Left \", end = \"\")\n elif (action == 2): \n print(\"Right \", end = \"\")\n elif(action == 3): \n print(\"Down \", end = \"\")\n \n print(\"And end up in \", end = \"\")\n print(nxtState)\n \n print(\"But ended up in: \", end = \"\")\n \n \n #Find remaining states that can be possibly reached: \n altActions =[]\n \n if action == 0:\n altActions.append(1)\n altActions.append(2)\n elif action == 1: \n altActions.append(0)\n altActions.append(3)\n elif action == 2: \n altActions.append(0)\n altActions.append(3)\n else: \n altActions.append(1)\n altActions.append(2)\n \n \n #Pick one random of all possible next states\n altAction = random.choice(altActions)\n #Check if alternate possibility is valid \n if altAction == 0:\n nxtState1 = (self.state[0] - 1, self.state[1])\n elif altAction == 1:\n nxtState1 = (self.state[0], self.state[1] - 1)\n elif altAction == 2:\n nxtState1 = (self.state[0], self.state[1] + 1) \n elif altAction == 3: \n nxtState1 = (self.state[0] + 1, self.state[1])\n \n \n \n #If alternate possibility is valid, update values\n if self.isValid(nxtState1): \n print(nxtState1)\n \n #Update Values \n self.state = nxtState1\n self.isEnd()\n return nxtState1, self.giveReward(), self.isTerminal\n \n #If alternate possibility is not valid, then you stay in place\n else: \n #Stay in place \n print(self.state)\n print(\"Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal\n else: \n #Stay in place \n print(self.state)\n print(\"Invalid action picked, Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal", "def step(self, action, state):\r\n done = False\r\n next_state = json.loads(state)\r\n state_shift = self.action_map[action]\r\n\r\n next_state = np.array(next_state) + np.array(state_shift)\r\n next_state = next_state.tolist()\r\n\r\n reward = self.reward[next_state[0], next_state[1]]\r\n self.reward[next_state[0], next_state[1]] = 0\r\n\r\n next_state = json.dumps(next_state)\r\n\r\n if reward < 0 or reward == 4:\r\n done = True\r\n\r\n return next_state, reward, done", "def step(self):\n # Take action\n # self._take_action(action)\n # moved outside\n\n # take timestep\n total_finished_jobs = 0\n for node in self.nodes:\n finished_jobs = node.step()\n total_finished_jobs += len(finished_jobs)\n self.cumulative_wait_time += len(self.queue)\n\n # Calculate reward\n reward = total_finished_jobs\n \n # Get new arrivals\n new_jobs = self.get_job_arrivals()\n for job in new_jobs:\n self.queue.append(job)\n\n #increment priority\n for job in self.queue:\n job.priority += 1\n\n # Game over when queue is empty\n if len(self.queue) == 0:\n game_over = True\n else:\n game_over = False\n\n # Just return reward for now, because our system is fully transparent\n # i.e., the Agent has perfect knowledge of environment state.\n # return ob, reward, episode_over, debug_info\n return None, reward, game_over, {}", "def step(self):\n self.step_n += 1\n self.step_t += 1\n # TODO: directly calling agent.act will by-pass BaseDeepAgent, which\n # checks and assigns 'sess' arugment. So we manually set sess here. But\n # is there a better way to do this?\n self.action = self.agent.act(\n state=self.state, sess=self.agent.sess\n )\n next_state, vec_reward, done, _ = self.env.step(self.action)\n reward, done = func_compile_exp_agent(self.action, vec_reward, done)\n self.total_reward = reward + self.reward_decay * self.total_reward\n info = self.agent.step(\n state=self.state, action=self.action, reward=reward,\n next_state=next_state, episode_done=done\n )\n self.record(info)\n flag_success = True if done and reward > 0.0 else False\n if self.savedir is not None:\n self.steps_saver.save(self.episode_n, self.step_t, self.state, self.action,\n vec_reward, reward, done, self.total_reward, flag_success)\n self.state = next_state\n if done:\n self.step_t = 0\n return done", "def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def next(self):\n while not self.is_stable():\n self.step()", "def step(self, message=None):\n self.current_step += 1\n if self.current_step <= self.total_steps:\n self.run_callbacks(message=message)\n if self.parent:\n self.parent.step(message)\n else:\n self.current_step -= 1\n raise StopIteration", "def step(self, memories):\n return", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def run(self, p):\n while self.state < 3:\n self.__step(p)", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # if self.robot.body_xyz[0] > self.threshold:\n # rew = 1.0\n # self.threshold += 1\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info", "def step(self, action):\n state, reward, done, debug_info = self.sample_transition(action)\n self.set_state(state)\n if \"next_state_heuristic\" in debug_info:\n self._current_heuristic = debug_info[\"next_state_heuristic\"]\n return state, reward, done, debug_info", "def _on_step(self):\n # self.logger.record(\"current_reward\")\n # self.n_calls is automatically updated because\n # we derive from BaseCallback\n if self.n_calls % self.eval_freq == 0:\n # === YOUR CODE HERE ===#\n # Evaluate the agent:\n # you need to do self.n_eval_episodes loop using self.eval_env\n # hint: you can use self.model.predict(obs, deterministic=True)\n mean_reward, std_reward = evaluate_policy(self.model, self.eval_env, n_eval_episodes=self.n_eval_episodes)\n # Save the latest agent\n self.logger.record(\"eval_mean_reward\", mean_reward)\n self.model.save(self.save_latest)\n # and update self.best_mean_reward\n if mean_reward > self.best_mean_reward:\n self.best_mean_reward = mean_reward\n self.model.save(self.save_path)\n if self.verbose > 0:\n print(\"Saving new best model at {} timesteps\".format(self.n_calls))\n print(\"Saving new best model to {}.zip\".format(self.save_best))\n \n print(\"Best mean reward: {:.2f}\".format(self.best_mean_reward))\n \n\n # ====================== # \n return True", "def _step(self, action):\n transitions = self.query_model(self.s, action)\n prob, next_s, r, is_terminal = transitions[categorical_sample(\n (t[0] for t in transitions), self.rng)]\n\n next_s = np.asarray(next_s)\n for i in range(len(self.s) - 1):\n if next_s[i+1] < self.observation_space.high[i+1]:\n p = self.p_add[i]\n if(categorical_sample([p, 1-p], self.rng) == 0):\n next_s[i+1] += 1\n\n self.s = tuple(next_s)\n self.lastaction = action\n return (next_s, r, is_terminal, {\"prob\": prob})", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # rew = +1 if past int threshold for first time in episode\n # if self.robot.body_xyz[0] > self.threshold:\n # self.threshold += 1\n # rew = 1.0\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info", "def step(self, model):\n pass", "def step(self, model):\n pass", "def record(self, step):", "def step(self, action):\n x, y = self.state_to_coord(self.current_state)\n if action == self.actions['up']:\n possible_next_state = self.coord_to_state(x - 1, y)\n if x - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n elif possible_next_state in self.goal_states:\n result = possible_next_state, self.goal_reward, True\n else:\n result = possible_next_state, self.step_reward, False\n elif action == self.actions['right']:\n possible_next_state = self.coord_to_state(x, y + 1)\n if y + 1 >= self.columns or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['left']:\n possible_next_state = self.coord_to_state(x, y - 1)\n if y - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['down']:\n possible_next_state = self.coord_to_state(x + 1, y)\n if x + 1 >= self.rows or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n else:\n raise ValueError('Expected action value in {}, received {} in state {}'.\n format(self.actions, action, self.state_to_coord(self.current_state)))\n\n self.current_state = result[0]\n return result", "def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}", "def step(self, action):\n raise NotImplementedError", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def cur_step(self):\n return self._cur_step", "def step(self, action):\n \n success = False\n self.curr_step += 1\n self._take_action(action)\n self._take_action(action)\n self._take_action(action)\n\n # initialize reward and get state \n reward = 0.0\n ob = self._get_state()\n\n # give dense rewards \n if not self.sparse_reward:\n reward = self._get_reward()\n\n # bad terminal conditions\n if self.curr_step >= self.max_steps \\\n or self.target_distance >= self.max_distance \\\n or self.mean_radius_sheep >= self.max_radius:\n self.finish = True\n if self.sparse_reward:\n reward = -1.0\n\n # good terminal conditions\n if self.target_distance <= 1.0:\n success = True\n self.finish = True\n if self.sparse_reward:\n reward = 1.0\n\n # update rl parameters\n self.episode_length += 1\n self.episode_reward += reward\n\n # generate info return parameter\n if self.info_mode == 1 and self.finish:\n info = {'r':self.episode_reward, 'l':self.episode_length, \n 's': success}\n else:\n info = {'n':self.num_sheep, 's': success}\n\n return ob, reward, self.finish, info", "def Next(self):\n next_task = fleetspeak_test_lib.PopMessage(self.client_id)\n if next_task is None:\n return False\n\n try:\n responses = self.client_mock.HandleMessage(next_task)\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Error %s occurred in client\", e)\n responses = [\n self.client_mock.GenerateStatusMessage(\n next_task, 1, status=\"GENERIC_ERROR\")\n ]\n\n # Now insert those on the flow state queue\n for response in responses:\n self.PushToStateQueue(response)\n\n return True", "def __step(self, p):\n action = self.__action(p)\n temp_state = self.state\n\n if self.state == 0:\n if action == 1:\n self.state += 1\n elif self.state == 1:\n if action == 1:\n self.state -= 1\n else:\n self.state += 1\n else:\n if action == 1:\n self.state += 1\n else:\n self.state -= 1\n \n self.trajectory.append([temp_state, action, self.__reward(self.state)])", "def advance(self) -> None:\n pass", "def step(self, actions, *args, **kwargs):\n self.step_async(actions, *args, **kwargs)\n return self.step_wait()", "def nextStep(self):\n next_step = self.current_step + 1\n if next_step > self.total_steps:\n return\n\n self.current_chunk_size = next_step - sum(self.chunk_percentage)\n self.current_step = next_step", "def step(self, action):\n self.game.play_single_turn([action])\n next_obs = self.get_state()\n reward = self.game.player_1.score - self.game.turn_count\n done = self.game.check_for_end_of_game() or self.game.turn_count > 25\n\n if done:\n if self.game.player_1.score > self.game.player_2.score:\n reward += 25\n elif self.game.player_2.score > self.game.player_1.score:\n reward -= 25\n\n if self.game.save_images_suffix:\n image_suffix = f'{self.game.save_images_suffix}_{self.game.turn_count+1}'\n self.game.game_board.graphical_output(save=True, display=False, image_suffix=image_suffix)\n\n return next_obs, reward/100, done", "def active_result(self):\n return self.step_client.previous_step_result()", "def get_steps_num():\n return 0", "def teleop_step(self):\n # get current state\n state = self.panda.state\n self.step_number += 1\n\n return_state = self.return_state()\n\n # read in from keyboard\n key_input = self.key.get_controller_state()\n dpos, dquat, grasp, reset = (\n key_input[\"dpos\"],\n key_input[\"dquat\"],\n key_input[\"grasp\"],\n key_input[\"reset\"],\n )\n action = dpos\n\n # action in this example is the end-effector velocity\n self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)\n\n # take simulation step\n p.stepSimulation()\n\n # return next_state, reward, done, info\n next_state = self.panda.state\n return_next_state = self.return_state()\n reward, done = self.calculate_reward(next_state, action)\n print(f'step: {self.step_number}\\treward: {reward}\\tdone: {done}')\n if reset:\n done = True\n info = self.panda.state\n\n return return_state, action, reward, return_next_state, done, info", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def previous_step_result(self):\n return self._previous_step_result", "def next_step():\n global fenetre, grid\n if len(l)>2:\n board_display(l[0])\n l.pop(0)\n steps.set(str(len(l))+\" steps remaining\")\n elif len(l)==2:\n board_display(l[0])\n l.pop(0)\n steps.set(str(len(l))+\" step remaining\")\n else:\n board_display(l[0])\n steps.set(\"No more steps remaining\")\n btn.set(\"Finished!\")", "def perform_step(self, action):\n pass", "def step(self, action):\n raise NotImplementedError()" ]
[ "0.74529886", "0.74457574", "0.7440825", "0.73785466", "0.7311282", "0.70527315", "0.7038946", "0.68671244", "0.6764548", "0.67290527", "0.66969025", "0.6665891", "0.6658122", "0.6636712", "0.6608603", "0.66074103", "0.6606269", "0.6533289", "0.6533289", "0.6533289", "0.6471656", "0.6453098", "0.6450727", "0.6409392", "0.6397919", "0.6373048", "0.63702756", "0.6335034", "0.6294615", "0.6232808", "0.62011874", "0.61978614", "0.6173857", "0.6154893", "0.6152787", "0.6148168", "0.61471045", "0.61420226", "0.61302656", "0.61302656", "0.61166227", "0.6108648", "0.6091417", "0.6091417", "0.60863256", "0.6084612", "0.60583776", "0.60559773", "0.60065174", "0.6004094", "0.6002536", "0.5996403", "0.59855014", "0.5984574", "0.59812003", "0.59648865", "0.5962763", "0.5952719", "0.59387124", "0.59351856", "0.59233665", "0.59150726", "0.59007597", "0.58962196", "0.58962196", "0.58962196", "0.58962196", "0.58835405", "0.58795244", "0.5876208", "0.5876151", "0.58682907", "0.5867081", "0.586232", "0.5857593", "0.5856885", "0.5854475", "0.58417094", "0.5831465", "0.5831465", "0.5830839", "0.58288205", "0.58236945", "0.58099884", "0.58066815", "0.5803561", "0.58025336", "0.57996655", "0.5797421", "0.57963353", "0.5790281", "0.57900566", "0.578592", "0.5779826", "0.5756816", "0.5752532", "0.57496274", "0.57485425", "0.5746167", "0.5745307", "0.57436365" ]
0.0
-1
User chose a date range with the bootstrap daterange widget.
def setrange(): app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_date_range(self, range):\n\n date_range_string = \"{0} to {1}\".format(\n self.fmt_date(range.start_date), self.fmt_date(range.end_date)\n )\n log.info(\"Specifying a date range of: {0}\".format(date_range_string))\n\n # Enter the specified date range\n selector = self._driver.find_element_by_css_selector(self.DateRangeSelector)\n selector.clear()\n selector.send_keys(date_range_string)", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def select_date_range_option(self):\n log.info(\"Selecting the custom date range option.\")\n try:\n self._driver.find_element_by_css_selector(self.DateRangeRadioButton).click()\n except ElementNotVisibleException:\n # This seems to happen if you click the radio button\n # after it has been selected. So we eat this exception,\n # since the desired condition is likely satisfied.\n pass", "def select_date_interval_menu():\n while True:\n start_date = input('\\nInput desired start date with format dd-mm-yyyy:\\n')\n try:\n start_date = datetime.strptime(start_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid start date selected')\n while True:\n end_date = input('\\nInput desired start date with format dd-mm-yyyy,\\nor hit enter to select todays date\\n')\n if end_date == '':\n end_date = date.today()\n break\n else:\n try:\n end_date = datetime.strptime(end_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid end date selected')\n list_of_dates = pd.date_range(start_date, end_date, freq='d')\n list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]\n return list_of_dates", "def setrange():\n app.logger.debug(\"Entering setrange\")\n daterange = request.form.get('daterange')\n flask.session['daterange'] = daterange\n daterange_parts = daterange.split()\n flask.session['begin_date'] = interpret_date(daterange_parts[0])\n flask.session['end_date'] = interpret_date(daterange_parts[2])\n app.logger.debug(\"Setrange parsed {} - {} dates as {} - {}\".format(\n daterange_parts[0], daterange_parts[1],\n flask.session['begin_date'], flask.session['end_date']))\n startingBound = request.form.get('StartTime')\n endingBound = request.form.get('EndTime')\n flask.session['startInput'] = startingBound\n flask.session['endInput'] = endingBound\n\n userTimezone = request.form.get('timezone')\n\n\n\n return flask.redirect(flask.url_for(\"choose\", userTimezone=userTimezone))", "def rangeselector_date():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 7, \"label\": \"1w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 14, \"label\": \"2w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 1, \"label\": \"1m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }", "def adjust_date_range(driver, date_range):\n if date_range == 'All':\n return\n index = ['', 'All', '1', '2-7', '8-14', '15-30'].index(date_range)\n button_path = \"html/body/div[3]/div/div[2]/div[1]/div[4]/form/div/ul/li\" \\\n \"[3]/fieldset/button\"\n date_path = \"html/body/div[3]/div/div[2]/div[1]/div[4]/form/div/ul/li\" \\\n \"[3]/fieldset/div/ol/li[{}]/div/label\".format(index)\n attempts = 1\n while True:\n try:\n elem = driver.find_element_by_xpath(button_path)\n time.sleep(3)\n except Exception as e:\n attempts += 1\n if attempts > 25:\n break\n else:\n elem.click()\n time.sleep(3)\n driver.find_element_by_xpath(date_path).click()\n time.sleep(3)\n break", "def set_date_range(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n self.start_date = pd.Timestamp(start_date)\n self.end_date = pd.Timestamp(end_date)", "def search_by_date_range(self, tl):\n print(\"Search by date range\")\n dates = input(\"Please use YYYYMMDD-YYYYMMDD for date range: \")\n date1_str, date2_str = dates.split('-')\n try:\n date1 = datetime.datetime.strptime(date1_str, utils.fmt)\n date2 = datetime.datetime.strptime(date2_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date_range(tl)\n else:\n return tl.findall_date_range(date1, date2)", "def range_callback(data):\n global D\n D.ranges = data.ranges", "def is_date_range(self, is_date_range):\n\n self._is_date_range = is_date_range", "def date_range_filter(dr):\n assert IDateRange.providedBy(dr) or IDateRangeFactory.providedBy(dr)\n if IDateRangeFactory.providedBy(dr):\n dr = dr(datetime.now())\n factory = queryUtility(IFactory, dottedname(IQueryFilter))\n if factory is None:\n return ComponentLookupError('cannot find factory for query filter')\n return factory(value=(dr.start, dr.end), query_range=dr.query_range)", "def visitRange(self, date):\n raise NotImplementedError()", "def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)", "def _select_date_changed(self):\n self.model.edit_traits(view=View(\n UCustom('date'),\n buttons=['OK'],\n title=u'数据生成日期选择',\n kind='panel',\n ))", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def get_date(prompt, title, min_date, max_date):\r\n question = prompt + ' Please select the year:'\r\n choices = [i for i in range(min_date.year, max_date.year + 1)]\r\n year = e.choicebox(question, title, choices)\r\n if year == None:\r\n raise QuitError\r\n else:\r\n year = int(year)\r\n question = 'Please select the month:'\r\n choices = [('0' + str(i))[-2:] for i in range(1, 13)]\r\n if min_date.year == max_date.year:\r\n choices = choices[min_date.month - 1: max_date.month]\r\n elif year == min_date.year:\r\n choices = choices[min_date.month - 1:]\r\n elif year == max_date.year:\r\n choices = choices[:max_date.month]\r\n month = e.choicebox(question, title, choices)\r\n if month == None:\r\n raise QuitError\r\n else:\r\n month = int(month)\r\n question = 'Please select the day:'\r\n month_length = c.monthrange(year, month)[1]\r\n choices = [('0' + str(i))[-2:] for i in range(1, month_length + 1)]\r\n if (min_date.year, min_date.month) == (max_date.year, max_date.month):\r\n choices = choices[min_date.day - 1: max_date.day]\r\n elif (year, month) == (min_date.year, min_date.month):\r\n choices = choices[min_date.day - 1:]\r\n elif (year, month) == (max_date.year, max_date.month):\r\n choices = choices[:max_date.day]\r\n day = e.choicebox(question, title, choices)\r\n if day == None:\r\n raise QuitError\r\n else:\r\n day = int(day)\r\n return d.date(year, month, day)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def form_SelectChoiceDate(request):\n schema = schemaish.Structure()\n schema.add('myDateSelect', schemaish.Date())\n options = [(datetime.date(1970,1,1),'a'),(datetime.date(1980,1,1),'b'),(datetime.date(1990,1,1),'c')]\n\n form = formish.Form(schema, 'form')\n form['myDateSelect'].widget = formish.SelectChoice(options)\n return form", "def date_range(self, start, end, check_date):\n if start <= end:\n return start <= check_date <= end\n else:\n return start <= check_date or check_date <= end", "def limit_date_range_to(self):\n return self._limit_date_range_to", "def create_date_list(start_date = start_date, end_date = end_date):", "def date_range(start, end):\n session = Session(engine)\n \n sel = [func.min(measurement.tobs),\n func.max(measurement.tobs),\n func.avg(measurement.tobs)]\n \n range_data = session.query(*sel).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n \n session.close()\n \n range_x = list(np.ravel(range_data))\n\n return jsonify(range_x)", "def check_required_range(specific=None, begin=None, end=None):\n\n if not specific and not (begin and end):\n raise ValueError('You must pass some form of date filter')\n\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific dates')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for date range\")", "def preprocess_dates(args):\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def _set_dates(self, case_date):\n d1 = case_date - timedelta(days=self.interval)\n e1 = case_date\n\n start_date_mdy = datetime.strftime(d1, \"%m/%d/%Y\")\n end_date_mdy = datetime.strftime(case_date, \"%m/%d/%Y\")\n start_date = str(\n {\n \"valueAsString\": f\"{d1}-00-00-00\",\n \"lastSetTextBoxValue\": f\"{start_date_mdy}\",\n }\n )\n end_date = str(\n {\n \"valueAsString\": f\"{e1}-00-00-00\",\n \"lastSetTextBoxValue\": f\"{end_date_mdy}\",\n }\n )\n self.data[f\"{self.x}$startDate$dateInput\"] = start_date_mdy\n self.data[f\"{self.x}$endDate$dateInput\"] = end_date_mdy\n self.data[f\"{self.y}_startDate_dateInput_ClientState\"] = start_date\n self.data[f\"{self.y}_endDate_dateInput_ClientState\"] = end_date\n self.data[f\"{self.x}$btnSearch\"] = \"Search\"\n self.data[\n f\"{self.x}$radGridOpinions$ctl00$ctl03$ctl01$PageSizeComboBox\"\n ] = \"20\"", "def showSelectedDate(self):\n pass", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def _date_range(start: str, end: str) -> List[str]:\n start_dt = _parse_ISO8601_date(start)\n end_dt = _parse_ISO8601_date(end)\n if start_dt > end_dt:\n raise ValidationError(\n \"Start date needs to be greater than or equal end date.\"\n )\n if (\n start_dt < _parse_ISO8601_date('1900') or\n end_dt > datetime.datetime.now().astimezone()\n ):\n raise ValidationError(\n \"Start date needs to be less than 1900-01-01T00:00:00Z and end\"\n \" date can't be from the feature.\"\n )\n return map(lambda date: date.isoformat(), rrule(\n freq=DAILY,\n dtstart=start_dt,\n until=end_dt,\n cache=True\n ))", "def filter_datetime_range(self, queryobject, start_datetime, end_datetime):\n raise NotImplementedError()", "def set_range(self, start=None, end=None, occurrences=None):\n if start is None:\n if self.__start_date is None:\n self.__start_date = dt.date.today()\n else:\n self.start_date = start\n\n if end:\n self.end_date = end\n elif occurrences:\n self.__occurrences = occurrences\n self._track_changes()", "def limit_date_range_from(self):\n return self._limit_date_range_from", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def create_daterange(start_date, end_date, freq, update=True):\n if update:\n start_date, end_date = update_dates(start_date, end_date, freq)\n\n return pd.date_range(start_date, end_date, freq=freq)", "def test_charter_form_date_widgets(self):\n\n cf = CharterForm()\n self.assertIsInstance(cf.fields[\"start_date\"].widget, SelectDateWidget)\n self.assertIsInstance(cf.fields[\"end_date\"].widget, SelectDateWidget)\n self.assertEqual(cf.fields[\"start_date\"].widget.attrs[\"class\"], \"date-input\")\n self.assertEqual(cf.fields[\"end_date\"].widget.attrs[\"class\"], \"date-input\")", "def do_date_range(parser, token):\r\n chunks = token.split_contents()\r\n if not len(chunks) >= 3:\r\n raise template.TemplateSyntaxError, \"%r tag requires two or three arguments\" % token.contents.split()[0]\r\n if not len(chunks) <=4 :\r\n raise template.TemplateSyntaxError, \"%r tag requires two or three arguments\" % token.contents.split()[0]\r\n if len(chunks) == 4:\r\n format = chunks[3]\r\n else:\r\n format = \"\"\r\n return DateRangeNode(chunks[1],chunks[2],format)", "def test_search_date_range_returns_correct_menu(self):\n # add some data to the database\n test_employee = [\n {'id': 1, 'name': \"Test Employee 1\"},\n ]\n test_log_entry_dates = [\n datetime.date(2018, 1, 1),\n datetime.date(2018, 1, 2),\n datetime.date(2018, 3, 4),\n datetime.date(2018, 5, 6),\n datetime.date(2018, 5, 7),\n ]\n e = db_manager.Employee.get_or_create(name=test_employee[0]['name'])\n # create some log entries\n for date in test_log_entry_dates:\n db_manager.LogEntry.create(\n employee=e[0],\n date=date,\n task_name='Test task for date {}'.format(date),\n duration=10,\n notes='Note'\n )\n\n start_index = 1\n end_index = -2\n\n fmt = \"%Y-%m-%d\"\n start_date_string = test_log_entry_dates[start_index].strftime(fmt)\n end_date_string = test_log_entry_dates[end_index].strftime(\"%Y-%m-%d\")\n user_inputs = [\n start_date_string,\n end_date_string\n ]\n\n with patch('builtins.input', side_effect=user_inputs):\n result = self.menu.search_date_range()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)", "def get_user_input():\n # Gets user input in M\\nD\\nYYYY format for the start date\n start_instrings = [\"Enter start month: \",\n \"Enter start day: \", \"Enter start year: \"]\n raw_start_date = tuple(input(s) for s in start_instrings)\n # Gets user input in M\\nD\\nYYYY format for the end date\n end_instrings = [\"Enter end month: \",\n \"Enter end day: \", \"Enter end year: \"]\n raw_end_date = tuple(input(s) for s in end_instrings)\n\n # Uses map to convert string input to integers and stores the values in a tuple\n start_date = tuple(map(int, raw_start_date))\n end_date = tuple(map(int, raw_end_date))\n\n # Checks if each year is within the date limit\n if not(1971 <= start_date[2] <= 2020 and 1971 <= end_date[2] <= 2020):\n raise Exception(\"Input date/s outside date limit.\")\n\n # Cyclic rotation of elements (because I really really **really** want to unpack)\n # Source: https://www.geeksforgeeks.org/python-shift-last-element-to-first-position-in-list/\n start_date, end_date = start_date[-1:] + \\\n start_date[:-1], end_date[-1:] + end_date[:-1]\n\n # As you can see unpacking makes the line smaller and more readable\n # return DateRange(datetime.date(start_date[2], start_date[0], start_date[1]), datetime.date(end_date[2], end_date[0], end_date[1]))\n return DateRange(datetime.date(*start_date), datetime.date(*end_date))", "def parse_range(option):\n return {\"range\": timedelta(days=option)}", "def other_date(self, bot, update):\n logger.info(\"PICK OTHER DATE\")\n update.message.reply_text(\"Please select a date: \",\n reply_markup=telegramcalendar.create_calendar())\n\n return 2", "def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]", "def onchange_start_date(self, start_date=False):\n if not start_date:\n return {}\n result = {'value': {'last_renovation_date': start_date}}\n return result", "def range_date():\n # Query all stations within a certain range\n data = [Measurement.date, func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)]\n qry = session.query(*data).filter(Measurement.date.between('2014-01-17', '2017-01-01')).all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)", "def _restricted_dates(date):\n _dates = list(date)\n try:\n return_date = datetime.strptime(date, '%Y-%m-%d').date()\n # end_date = datetime.strptime(dates[1], '%Y-%m-%d').date()\n except ValueError:\n raise argparse.ArgumentTypeError(\n f\"Could not parse dates. Did you format them yyyy-mm-dd? Dates received:\\n{date}\")\n\n # if start_date > end_date:\n # raise argparse.ArgumentTypeError(\n # f\"Start date {start_date} may not be later than end date {end_date}\")\n # return [start_date, end_date, 55]\n return return_date", "def billing_choose_dates(self):\n number_of_dates_to_be_generated_per_patient = (\n self.number_of_dates_to_be_generated_per_patient\n )\n dunning_cycle_length = self.dunning_cycle_length\n dates = self.dates\n first_date = random.choice(\n dates\n ) # randomly choose a start date from the list of possible start dates\n last_possible_date = first_date + datetime.timedelta(\n days=dunning_cycle_length\n ) # calculate the last date possible based on Dunnin Cycle\n time_between_dates = last_possible_date - first_date\n subsequent_events = random.sample(\n list(np.arange(0, time_between_dates.days)),\n number_of_dates_to_be_generated_per_patient,\n )\n subsequent_events.sort()\n dates = [\n first_date + datetime.timedelta(days=np.int(subsequent_event))\n for subsequent_event in subsequent_events\n ]\n event_list = pd.DataFrame(dates)\n return event_list", "def date_range(start, end):\n r = (end + timedelta(days=1) - start).days\n return [start + timedelta(days=i) for i in range(r)]", "def year_range(df):\n\n if not isinstance(df, pd.DataFrame):\n print(\"year_range was not passed a pandas DataFrame.\")\n return\n\n df['year_start'] = df['year'].min()\n df['year_end'] = df['year'].max()\n df.drop('year' , axis = 1, inplace = True)\n return df", "def configure_date_type_question(self, question_data):\n self.driver.find_radio_button(DATE_RB).click()\n date_format = fetch_(DATE_FORMAT, from_(question_data))\n if (date_format == MM_YYYY):\n self.driver.find_radio_button(MONTH_YEAR_RB).click()\n elif (date_format == DD_MM_YYYY):\n self.driver.find_radio_button(DATE_MONTH_YEAR_RB).click()\n elif (date_format == MM_DD_YYYY):\n self.driver.find_radio_button(MONTH_DATE_YEAR_RB).click()\n return self", "def on_date_change(self):\n self.date = self.ui.calendarWidget.selectedDate()\n self.update_views()", "def df_range(self, value: int):\n self._df_range = value", "def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0", "def read_day_range(where):\n\twhile True:\n\t\tif (where == 'start'):\n\t\t\t_day = input(\"Introduceti ziua de inceput: \")\n\t\telif (where == 'end'):\n\t\t\t_day = input(\"Introduceti ziua de sfarsit: \")\n\t\telse:\n\t\t\traise NameError\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\t\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)", "def compute_daterange(df: pd.DataFrame):\n\n start_date = df[\"Date\"].iloc[0]\n end_date = df[\"Date\"].iloc[-1]\n return pd.date_range(start_date, end_date)", "def date_range_validator(input_date_str, from_date_str, to_date_str, pattern=\"%d-%m-%Y\"):\n import sys\n from datetime import datetime\n try:\n input_date = datetime.strptime(input_date_str, pattern).date()\n from_date = datetime.strptime(from_date_str, pattern).date()\n to_date = datetime.strptime(to_date_str, pattern).date()\n if from_date <= input_date and input_date <= to_date:\n return True\n else:\n return False\n except Exception as E:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(f\"date_range_validator Error: {E} at {exc_tb.tb_lineno}, Exception Type: {exc_type}\")\n return False", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def conflicts(request):\n\n form = ConflictsForm(request.GET)\n if form.is_valid():\n beg_date = form.cleaned_data['beg_date']\n end_date = form.cleaned_data['end_date']\n else:\n beg_date, end_date = get_week_range_by_date(datetime.datetime.today())\n\n terms = Term.prepare_conflict_dict(beg_date, end_date)\n title = 'Konflikty'\n return TemplateResponse(request, 'schedule/conflicts.html', locals())", "def date_range(start_date, end_date):\n return [start_date + timedelta(x) for x in range((end_date - start_date).days + 1)]", "def setRange(self, x_range, y_range):\n pass", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def search_by_range(s, start_date=None, start_time=None, end_date=None,\n end_time=None):\n\n url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \\\n 'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'\n s.driver.get(url)\n if start_date:\n s.driver.ensure_element_by_name('dateDebut').send_keys(start_date)\n if start_time:\n s.driver.ensure_element_by_name('heureDebut').send_keys(start_time)\n if end_date:\n s.driver.ensure_element_by_name('dateFin').send_keys(end_date)\n if end_time:\n s.driver.ensure_element_by_name('heureFin').send_keys(end_time)\n s.driver.ensure_element_by_id('button-1009').click()\n return s", "def update_treasury_date_dropdown(add_button, date, existing_dates):\n date = app_obj.utils.parse_date(date).date()\n\n existing_dates_lst = [str(d[\"value\"]) for d in existing_dates]\n\n if str(date) not in existing_dates_lst:\n existing_dates.append({'label': date, 'value': date})\n\n return existing_dates", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def get_date_range():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n animals = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n animals.append(animal)\n\n return jsonify(animals)", "def limit_date_range_from(self, limit_date_range_from):\n\n self._limit_date_range_from = limit_date_range_from", "def set_start_date(self, start_date):\n self.set_value_into_input_field(self.start_date_inputbox_locator, start_date)", "def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)+1):\n yield start_date + timedelta(n)", "def dtd(cls, date: Date) -> \"DateRange\":\n return cls(date, date)", "def search_date():\n while True:\n clear()\n print(dedent(\"\"\"\n What do you want to do? Enter a or b.\n a) Choose from a list of dates\n b) Search by a date range\n c) Return to search menu\n \"\"\"))\n choice = input(\"> \")\n if choice == \"a\":\n work_log.multiple_matches(type='date')\n elif choice == \"b\":\n work_log.search_date_range()\n elif choice == \"c\":\n break\n else:\n print(\"Please enter a valid choice\")\n time.sleep(3)", "def test_range__no_end_date(self):\n data = self._data()\n data.pop('end_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)", "def __rangeChanged(self, first, second):\n tooltip = \"Histogram range:\\n[%g, %g]\" % (first, second)\n self.__rangeSlider.setToolTip(tooltip)\n self.__rangeLabel.setToolTip(tooltip)", "def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)", "def get_day_range(a, b, date_format='%Y-%m-%d'):\n today = datetime.datetime.now().date()\n res = [today + datetime.timedelta(days=a), today + datetime.timedelta(days=b)]\n\n if date_format is None:\n return res\n return [datetime.datetime.strftime(x, date_format) for x in res]", "def limit_date_range_to(self, limit_date_range_to):\n\n self._limit_date_range_to = limit_date_range_to", "def date_in_range(start, end, x):\n\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + dt.timedelta(n)", "def daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)+1):\n yield start_date + dt.timedelta(n)", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def get_date_range(startdate, enddate):\n if enddate < startdate:\n raise Exception(\"Passed in enddate that was before start date, did you flip your variables around?\")\n \n if isinstance(startdate, datetime.datetime): startdate = startdate.date()\n if isinstance(enddate, datetime.datetime): enddate = enddate.date()\n \n totalspan = enddate-startdate\n return [startdate + timedelta(days=day) for day in range(0, totalspan.days+1)]", "def dateRange(self,str_start_time,str_end_time):\n\t\ttmp = str_start_time.split('-')\n\t\ttmp1 = str_end_time.split('-')\n\t\tstart_time = datetime.datetime(int(tmp[0]),int(tmp[1]),int(tmp[2]))\n\t\tend_time = datetime.datetime(int(tmp1[0]),int(tmp1[1]),int(tmp1[2]))\n\t\tfor n in range(int((end_time-start_time).days)):\n\t\t\tyield start_time + datetime.timedelta(n)", "def get_dates(self, candidates=None, start=None, end=None):\n if candidates is not None:\n return [date for date in candidates if date in self.data]\n if start is None:\n start = self.first_date\n if end is None:\n end = self.last_date\n return [date for date in self.data if start <= date <= end]", "def date_range(all_files,start_year,start_month,start_day,end_year,end_month,\r\n end_day):\r\n\r\n d1 = date(start_year,start_month,start_day)\r\n d_last = date(end_year,end_month,end_day)\r\n day_range = (d_last - d1).days\r\n #print('day range: %s' %day_range)\r\n files = []\r\n for t in range(day_range):\r\n d2 = d1 + timedelta(t)\r\n d2_str1 = str(d2)\r\n d2_str2 = d2.strftime('%Y_%m_%d')\r\n # print(d2)\r\n for f in all_files:\r\n if d2_str1 in str(f) or d2_str2 in str(f):\r\n files.append(f)\r\n return(files)", "def pickDate(self,event=None):\r\n curLine = self.missingDates.getSelection() # Returns a string because missingDates is a listbox control, not a treeview\r\n# print(curLine)\r\n# newData=self.fetchJournalData(curLine)\r\n# self.setData(newData)\r\n self.date.setDateText(curLine)", "def test_date_range_reconcile(klass, datetime, tzutc):\n r = klass(title=\"Foo\")\n r.start_date = datetime(2016, 5, 21, 0, 0, 0)\n r.end_date = datetime(2016, 6, 21, 11, 59, 59)\n\n assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday\n assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday", "def dateB(self):\r\n self.date = self.cal.selectedDate()\r\n self.lineEditWidgets[\"CUMPLEAÑOS\"].setText(\r\n self.date.toString(\"yyyy-MM-dd\"))", "def test_range__bad_end_date(self):\n self.end_date = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)", "def print_choice_menu():\n month1 = input(\"Give the month as 4, 5, 6, 7, 8, or 9: \")\n print()\n day1 = input(\"Give the day as 1, 2, ..., 29, 30, or 31: \")\n print()\n\n try:\n month1 = int(month1)\n day1 = int(day1)\n\n if month1 in [4, 5, 6, 7, 8, 9]:\n if day1 in list(range(1, 32)):\n if day1 == 31 and month1 in [4, 6, 9]:\n print(\"Invalid Date!\")\n sleep(2)\n return 0, 0\n elif month1 > datetime.now().month or (month1 == datetime.now().month and day1 > datetime.now().day):\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n sleep(2)\n return month1, day1\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n\n except ValueError:\n print(\"User Error\")\n sleep(2)\n return 0, 0", "def complete_form_and_download(self, start: datetime, end: datetime) -> str:\n log.info(\"---------------\")\n self._click_range_button()\n self._enter_start_date(start)\n self._enter_end_date(end)\n self._submit_form()\n return self._export_data()", "def get_daterange(daterange: List[str]) -> Tuple[dt.datetime, dt.datetime]:\n if daterange is None:\n dt_start = dt.datetime.strptime(\"2017-01-01\", \"%Y-%m-%d\")\n dt_end = dt.datetime.now()\n else:\n dt_start = parse_date(daterange[0])\n if len(daterange) == 1:\n dt_end = parse_date(None)\n else:\n dt_end = parse_date(daterange[1])\n\n return dt_start, dt_end", "def range_temp(start,end):\n year, month, date = map(int, start.split('-'))\n date_start = dt.date(year,month,day)\n year2, month2, date2 = map(int, end.split('-'))\n date_end = dt.date(year2,month2,day2)\n # Query for tobs for definied date range\n results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs).\\\n func.avg(Measurement.tobs)).filter(Measurement.date >= date_start).filter(Measurement.date <= date_end).all()\n data = list(np.ravel(results))\n return jsonify(data)", "def _updateDisplayRange(self, dmin, dmax):\n self._wmin.setText(\"%.4g\" % dmin)\n self._wmax.setText(\"%.4g\" % dmax)\n self._updateFullRangeIcon()", "def test_date_range_fields():\n now = datetime.datetime(2017, 6, 13, 9, 44, 31, 62870)\n fields = {\n 'estimated_land_date_after': now,\n 'estimated_land_date_before': now,\n 'adviser.id': 1234,\n }\n\n filters, ranges = _split_range_fields(fields)\n\n assert filters == {\n 'adviser.id': 1234,\n }\n assert ranges == {\n 'estimated_land_date': {\n 'gte': now,\n 'lte': now,\n },\n }", "def set_begin_date_for_search(self, begin_date):\n self.set_value_into_input_field(self.begin_date_locator, begin_date)", "def get_slider():\n return dcc.RangeSlider(\n id='hours',\n value=[0, 23],\n min=0,\n max=23,\n marks={i: str(i) for i in range(0, 24, 3)}\n )", "def set_from_date(self, date):\n self.set_value_into_input_field(self.set_from_date_locator, date)", "def getSliderRange(*args):\n\n #get timeslider range start\n startF = cmds.playbackOptions(query=True, min=True)\n endF = cmds.playbackOptions(query=True, max=True)\n return(startF, endF)", "def update_swap_date_dropdown(add_button, date, existing_dates):\n date = app_obj.utils.parse_date(date).date()\n\n existing_dates_lst = [str(d[\"value\"]) for d in existing_dates]\n\n if str(date) not in existing_dates_lst:\n existing_dates.append({'label': date, 'value': date})\n\n return existing_dates", "def setSelectedDate(self, data):\n # print('setSelectedDate ', data)\n self.currentDate = data", "def t_range_years(t_range):\r\n start_year = int(t_range[0].split(\"-\")[0])\r\n end_year = int(t_range[1].split(\"-\")[0])\r\n end_month = int(t_range[1].split(\"-\")[1])\r\n end_day = int(t_range[1].split(\"-\")[2])\r\n if end_month == 1 and end_day == 1:\r\n year_range_list = np.arange(start_year, end_year)\r\n else:\r\n year_range_list = np.arange(start_year, end_year + 1)\r\n return year_range_list" ]
[ "0.7240307", "0.708021", "0.6923517", "0.66862506", "0.64068097", "0.6244564", "0.6082898", "0.6051793", "0.5973784", "0.5954892", "0.5869089", "0.57650083", "0.5739868", "0.57391983", "0.5736123", "0.57263255", "0.571437", "0.57027483", "0.56858015", "0.5682606", "0.5677092", "0.5654863", "0.5638502", "0.5601698", "0.556227", "0.5557863", "0.5530671", "0.5526246", "0.54746217", "0.54742545", "0.54735833", "0.545593", "0.5427912", "0.54083467", "0.5387834", "0.5332377", "0.5324697", "0.53225666", "0.5321833", "0.53025526", "0.53008986", "0.5300372", "0.5285869", "0.5285704", "0.52691793", "0.5226183", "0.52198565", "0.5208194", "0.5206005", "0.5202271", "0.5189119", "0.5186388", "0.5180977", "0.5168159", "0.5155941", "0.5148124", "0.51475745", "0.51464975", "0.5133132", "0.51257133", "0.51252276", "0.51250094", "0.51180935", "0.5115779", "0.51153046", "0.5114347", "0.51117486", "0.51070493", "0.5090799", "0.508632", "0.50786763", "0.5078285", "0.507602", "0.5065532", "0.5058686", "0.50558394", "0.5054498", "0.5042582", "0.5042582", "0.5042203", "0.50330687", "0.50251", "0.5018241", "0.500475", "0.5002987", "0.49964267", "0.49945432", "0.49904597", "0.4988507", "0.49812964", "0.4972386", "0.49710777", "0.49626175", "0.49579912", "0.49538586", "0.49508467", "0.4950413", "0.49482295", "0.49480426", "0.4945218" ]
0.67631936
3
Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main.
def init_session_values(): # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ConfigureDefaults(area_bounds=None, \n area_bounds_format=['x_min','y_min','x_max','y_max'], \n area_bounds_range=None, years_are_bounds=False,\n dates_are_bounds=False, init_date_str_format='%y%m%d',\n member_name='realization', period_name='time', \n initialistion_time_name='forecast_reference_time'): \n global default_area_bounds\n global default_area_bounds_format\n global default_area_bounds_range\n global default_years_are_bounds\n global default_dates_are_bounds\n global default_init_date_str_format\n global default_member_name\n global default_period_name\n global default_initialistion_time_name\n \n default_area_bounds = area_bounds\n default_area_bounds_format = area_bounds_format\n default_area_bounds_range = area_bounds_range\n default_years_are_bounds = years_are_bounds\n default_dates_are_bounds = dates_are_bounds\n default_init_date_str_format = init_date_str_format\n default_member_name = member_name\n default_period_name = period_name\n default_initialistion_time_name = initialistion_time_name", "def checkAndDefaultArgs(args):\n if not args.date:\n args.date = datetime.now().strftime(\"%d/%m/%Y\")\n\n if args.time_in:\n if \"am\" not in args.time_in and \"pm\" not in args.time_in:\n print(\"Invalid time_in string provided: Using default 1pm\")\n args.time_in = \"1pm\"\n else:\n args.time_in = \"1pm\"\n\n if args.time_out:\n if \"am\" not in args.time_out and \"pm\" not in args.time_out:\n print(\"Invalid time_out string provided: Using default 2pm\")\n args.time_out = \"2pm\"\n else:\n args.time_out = \"2pm\"", "def __init__(self,\n day=None,\n end_time=None,\n start_time=None,\n ):\n\n # Initialize members of the class\n self.day = day\n self.end_time = end_time\n self.start_time = start_time", "def _use_default_schedule(self):\n def gen_day():\n dl = []\n ll = [-1, '', -1, '', '']\n for i in range(8):\n dl.append(ll[:])\n rl = []\n for i in range(4):\n rl.append(dl[:])\n return rl\n\n self.schedule = {\n 'current_week': [1, date.today().isocalendar()[1]],\n 'lessons_time': [\n ['8:00', '9:35'],\n ['9:45', '11:20'],\n ['11:40', '13:15'],\n ['13:25', '15:00'],\n ['15:20', '16:55'],\n ['17:05', '18:40'],\n ['18:45', '20:20'],\n ['20:25', '22:00']\n ],\n 'schedule': {\n 'Monday': gen_day(),\n 'Tuesday': gen_day(),\n 'Wednesday': gen_day(),\n 'Thursday': gen_day(),\n 'Friday': gen_day(),\n 'Saturday': gen_day()\n },\n 'subgroup': 0\n }", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def i_see_the_set_dates(_step):\r\n verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')\r\n verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')\r\n verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')\r\n verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')\r\n\r\n verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)\r\n # Unset times get set to 12 AM once the corresponding date has been set.\r\n verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)", "def default(timeframe_from: int, timeframe_to: int,\n lines: List[List[List[str]]], times: List[List[datetime]],\n colored: bool) -> None:\n diff_seconds = [(stop - start).seconds for start, stop in times]\n total_total_seconds = sum(diff_seconds)\n avg_total_seconds = total_total_seconds // (timeframe_from - timeframe_to)\n\n if colored:\n print(f'Chosen display: {colors.FG.BRIGHT.RED}DEFAULT{colors.RESET}\\n')\n else:\n print('Chosen display: DEFAULT\\n')\n\n display_lines(lines, times, colored)\n display_summary('Average', avg_total_seconds, colored, ' per day')\n display_summary('Total', total_total_seconds, colored)", "def __init__(__self__, *,\n end_time: pulumi.Input[str],\n start_time: pulumi.Input[str]):\n pulumi.set(__self__, \"end_time\", end_time)\n pulumi.set(__self__, \"start_time\", start_time)", "def from_all(self):\n print ('\\nGuessing Date from Timestamp: ' + sys.argv[2] + '\\r')\n print ('Outputs which do not result in a date/time value are not displayed.\\r')\n print ('\\033[1;31mMost likely results (results within +/- 5 years) are highlighted.\\n\\033[1;m'.format())\n self.from_unix_sec()\n self.from_unix_milli()\n self.from_win_64_hex()\n self.from_win_64_hexle()\n self.from_chrome()\n self.from_ad()\n self.from_unix_hex_32be()\n self.from_unix_hex_32le()\n self.from_cookie()\n self.from_ole_be()\n self.from_ole_le()\n self.from_mac()\n self.from_hfs_dec()\n self.from_hfs_be()\n self.from_hfs_le()\n self.from_msdos()\n self.from_fat()\n self.from_systime()\n self.from_filetime()\n self.from_prtime()\n self.from_ole_auto()\n self.from_ios_time()\n self.from_sym_time()\n self.from_gps_time()\n self.date_output()\n print ('\\r')", "def create_date_list(start_date = start_date, end_date = end_date):", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', '--start_date', nargs='?', default=SETTINGS.MIN_START_DATE, \n type=str, help=f'Start date string in format YYYYMMDD, between '\n f'{SETTINGS.MIN_START_DATE} and {SETTINGS.MAX_END_DATE}', metavar='')\n parser.add_argument('-e', '--end_date', nargs='?', default=SETTINGS.MAX_END_DATE,\n type=str, help=f'End date string in format YYYYMMDD, between '\n f'{SETTINGS.MIN_START_DATE} and {SETTINGS.MAX_END_DATE}', metavar='')\n \n return parser.parse_args()", "def load_defaults():\n c = itertools.cycle([4, 5, 6, 7, 1, 2, 3])\n dates = zip(\n [2015] * 90,\n [1] * 31 + [2] * 28 + [3] * 31,\n range(1, 32) + range(1, 29) + range(1, 32),\n [c.next() for _ in range(90)]\n )\n\n months = list(enumerate(\n [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\",\n \"August\", \"September\", \"October\", \"November\", \"December\"], start=1))\n\n session = Session()\n for date in dates:\n date = Date(\n year=date[0],\n month=date[1],\n day=date[2],\n dow=date[3])\n session.add(date)\n\n for month in months:\n date = Month(\n id=month[0],\n name=month[1])\n session.add(date)\n\n session.commit()\n session.close()", "def __init__(__self__, *,\n end: pulumi.Input[str],\n start: pulumi.Input[str]):\n pulumi.set(__self__, \"end\", end)\n pulumi.set(__self__, \"start\", start)", "def __init__(self,\n change_time_range_end_secs=None,\n change_time_range_start_secs=None,\n ):\n\n # Initialize members of the class\n self.change_time_range_end_secs = change_time_range_end_secs\n self.change_time_range_start_secs = change_time_range_start_secs", "def _set_default_args(self):\n self._parser.add_argument(\"username\")\n self._parser.add_argument(\"password\")\n self._parser.add_argument(\n \"--start\",\n help=\"Start date for the scraper in iso format, eg: 2017-11-19\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--end\",\n help=\"End date for the scraper in iso format\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--skip-delete\",\n help=\"Delete the scraper folder in /tmp after run\",\n action=\"store_true\",\n )", "def get_time_defaults(game_id: int, start_time: float = None, end_time: float = None):\n game_start, game_end = get_game_start_and_end(game_id)\n if start_time is None:\n start_time = game_start\n\n if end_time is None:\n current_time = time.time()\n end_time = current_time if current_time < game_end else game_end\n\n return start_time, end_time", "def __init__(self, initial_date=None, until_date=None):\n self.initial_date = initial_date\n self.until_date = until_date\n\n log.debug('self.initial_date: {}'.format(self.initial_date))\n log.debug('self.until_date: {}'.format(self.until_date))", "def main():\n print(initialize(['Peter Parker', 'Steve Rogers', 'Tony Stark']))\n print(initialize(\n ['Bruce Wayne', 'Clark Kent', 'Diana Prince'], period=True))", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def init_session_values():\n # Default date span = tomorrow to 1 week from now\n now = arrow.now('local') # We really should be using tz from browser\n tomorrow = now.replace(days=+1)\n nextweek = now.replace(days=+7)\n flask.session[\"begin_date\"] = tomorrow.floor('day').isoformat()\n flask.session[\"end_date\"] = nextweek.ceil('day').isoformat()\n flask.session[\"daterange\"] = \"{} - {}\".format(\n tomorrow.format(\"MM/DD/YYYY\"),\n nextweek.format(\"MM/DD/YYYY\"))\n # Default time span each day, 8 to 5\n flask.session[\"begin_time\"] = interpret_time(\"9am\")\n flask.session[\"end_time\"] = interpret_time(\"5pm\")\n flask.session[\"userTimezone\"] = \"America/Los_Angeles\"", "def test_set_project_default_power_schedule(self):\n pass", "def setup_base_settings(\n self,\n number_of_dates_to_be_generated_per_patient,\n dunning_cycle_length,\n possible_invoice_dates,\n customer_ids,\n dob_range,\n cpt_codes,\n distributions,\n ):\n self.number_of_dates_to_be_generated_per_patient = (\n number_of_dates_to_be_generated_per_patient\n )\n self.dunning_cycle_length = dunning_cycle_length\n self.dates = possible_invoice_dates\n self.customerIds = customer_ids\n self.consecutive = check_consecutive(self.customerIds)\n if self.consecutive == False:\n print(\"Error setting up the object- customerIds aren't consecutive\")\n print(self.customerIds)\n self.dobs = pd.date_range(\n start=dob_range[\"start_dob\"],\n end=dob_range[\"end_dob\"],\n periods=len(self.customerIds),\n ).date # range of valid dates of birth for the patients\n self.CPTCodes = cpt_codes # CPT codes to choose from\n self.invoices = [\n 10000\n ] # first invoice id- other invoices are monotonically increasing i.e. generated by adding one to the previous invoice.\n\n # dictionary used to define the assumptions used in generating the data set\n self.distributions = distributions\n return True", "def __init__(self, min=0, sec=0):\n self.min = min\n self.sec = sec", "def set_start_time():\n __start = current_time_milli()", "def gen_start_end_times(start_time=[6, 0, 0], end_time=[23, 0, 0]):\n\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n\n start_time = datetime(\n year, month, day, start_time[0], start_time[1], start_time[2], 0\n )\n\n end_time = datetime(year, month, day, end_time[0], end_time[1], end_time[2], 0)\n\n if end_time < now:\n end_time += timedelta(days=1)\n start_time += timedelta(days=1)\n\n return start_time, end_time", "def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name", "def set_date_range(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n self.start_date = pd.Timestamp(start_date)\n self.end_date = pd.Timestamp(end_date)", "def render_range_init():\n\n # Adding/Checking ftrack render range attribute\n defaultRenderGlobals = pm.PyNode(\"defaultRenderGlobals\")\n render_range_set = False\n if hasattr(defaultRenderGlobals, \"ftrackRenderRangeSet\"):\n attr = pm.Attribute(\"defaultRenderGlobals.ftrackRenderRangeSet\")\n render_range_set = attr.get()\n else:\n pm.addAttr(\n defaultRenderGlobals,\n longName=\"ftrackRenderRangeSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not render_range_set:\n\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n startFrame = float(task.getParent().get(\"fstart\"))\n endFrame = float(task.getParent().get(\"fend\"))\n\n handles = float(task.getParent().get(\"handles\"))\n\n mc.warning(\n \"Setting render range to {0} {1} \".format(startFrame, endFrame)\n )\n\n # Add handles to start and end frame\n hsf = startFrame - handles\n hef = endFrame + handles\n\n defaultRenderGlobals.animation.set(True)\n defaultRenderGlobals.animationRange.set(1)\n defaultRenderGlobals.startFrame.set(hsf)\n defaultRenderGlobals.endFrame.set(hef)\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.animType.set(1)", "def __init__(__self__, *,\n start_time: Optional[pulumi.Input[str]] = None):\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)", "def set_range(self, start=None, end=None, occurrences=None):\n if start is None:\n if self.__start_date is None:\n self.__start_date = dt.date.today()\n else:\n self.start_date = start\n\n if end:\n self.end_date = end\n elif occurrences:\n self.__occurrences = occurrences\n self._track_changes()", "def __init__(__self__, *,\n duration_hours: pulumi.Input[int],\n schedule: pulumi.Input['ScheduleArgs'],\n start_time: pulumi.Input[str],\n not_allowed_dates: Optional[pulumi.Input[Sequence[pulumi.Input['DateSpanArgs']]]] = None,\n start_date: Optional[pulumi.Input[str]] = None,\n utc_offset: Optional[pulumi.Input[str]] = None):\n if duration_hours is None:\n duration_hours = 24\n pulumi.set(__self__, \"duration_hours\", duration_hours)\n pulumi.set(__self__, \"schedule\", schedule)\n pulumi.set(__self__, \"start_time\", start_time)\n if not_allowed_dates is not None:\n pulumi.set(__self__, \"not_allowed_dates\", not_allowed_dates)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if utc_offset is not None:\n pulumi.set(__self__, \"utc_offset\", utc_offset)", "def default_run_test(data, name, start_trial_id, start):\n ratios = [1/5,1/4, 1/3, 1/2, 1, 2, 3, 4,5]\n start_ = datetime(day=start.day, month=start.month,\n year=start.year, hour=start.hour)\n\n for i in range(start_trial_id, 1 + start_trial_id):\n if start_.weekday() in [0, 6]:\n run_test(data, name=name, interval=10, begin_interval=start_, end_interval=start_ + timedelta(hours=12),\n trial='weekend' + str(i), ratios=ratios)\n else:\n run_test(data, name=name, interval=10, begin_interval=start_, end_interval=start_ + timedelta(hours=12),\n trial='weekday' + str(i), ratios=ratios)\n if i % 2 == 2:\n start_ += timedelta(days=1)", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def _default_dates():\n today = datetime.now().date()\n five_days_from_now = today + timedelta(days=5)\n # create readable format, as should be input\n # return [today.strftime('%Y-%m-%d'), five_days_from_now.strftime('%Y-%m-%d')]\n return [today, five_days_from_now]", "def __init__(self, database_manager=DataBaseManager(), emailer=EmailSender()):\n self.database_manager = database_manager\n self.emailer = emailer\n # Set available timeslots\n self.initial_time_slots = ['09:00:00',\n '10:00:00',\n '11:00:00',\n '12:00:00',\n '13:00:00',\n '14:00:00',\n '15:00:00',\n '16:00:00',\n '17:00:00']", "def __init__(self, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):\n # Between 0 and 86399 inclusive \n self.seconds = 0\n # Between -999999999 and 999999999 inclusive \n self.days = 0\n # Between 0 and 999999 inclusive \n self.microseconds = 0", "def parse_range(option):\n return {\"range\": timedelta(days=option)}", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def preprocess_dates(args):\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args", "def set_start_time(self, timestamp):\n self.start_day = int(timestamp[8:10])\n hour = int(timestamp[11:13])\n minute = int(timestamp[14:16])\n second = int(timestamp[17:19])\n usecond = float(int(timestamp[21:])) / 1000000\n self.start_time = float(hour * 3600 + minute * 60 + second) + usecond", "def setrange():\n app.logger.debug(\"Entering setrange\")\n daterange = request.form.get('daterange')\n flask.session['daterange'] = daterange\n daterange_parts = daterange.split()\n flask.session['begin_date'] = interpret_date(daterange_parts[0])\n flask.session['end_date'] = interpret_date(daterange_parts[2])\n app.logger.debug(\"Setrange parsed {} - {} dates as {} - {}\".format(\n daterange_parts[0], daterange_parts[1],\n flask.session['begin_date'], flask.session['end_date']))\n startingBound = request.form.get('StartTime')\n endingBound = request.form.get('EndTime')\n flask.session['startInput'] = startingBound\n flask.session['endInput'] = endingBound\n\n userTimezone = request.form.get('timezone')\n\n\n\n return flask.redirect(flask.url_for(\"choose\", userTimezone=userTimezone))", "def preprocess_date_and_time(params: Dict) -> None:\n start_date = date.fromisoformat(params[\"start_date\"])\n end_date = date.fromisoformat(params[\"end_date\"])\n\n if end_date < start_date:\n raise Exception(f\"End date is earlier than start date.\")\n \n start_time = time.fromisoformat(params[\"start_time\"])\n end_time = time.fromisoformat(params[\"end_time\"])\n\n if end_time != time.min and end_time <= start_time:\n raise Exception(\"End time is earlier or equal than start time\")\n \n actual_start = time(start_time.hour + 1 if start_time.minute + start_time.second + start_time.microsecond > 0 \n else start_time.hour)\n actual_end = time(end_time.hour)\n\n if actual_end == time.min and end_time != time.min:\n raise Exception(\"Non available blocks to use\")\n \n params.update({\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"start_time\": actual_start,\n \"end_time\": actual_end\n })", "def check_required_range(specific=None, begin=None, end=None):\n\n if not specific and not (begin and end):\n raise ValueError('You must pass some form of date filter')\n\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific dates')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for date range\")", "def __init__(self, start: datetime.date) -> None:\n self.start = start\n self.bill = None", "def handle_default_dates(after: typing.Union[dt.datetime, None],\n before: typing.Union[dt.datetime, None]):\n for date in [after, before]:\n if date and date.tzinfo is None:\n raise ValueError('dates are expected to be tzinfo-aware')\n if not after:\n after = get_year_ago(from_date=before)\n return after, before", "def _init_special_vars(self, T_start=None, T_end=None):\n self.min_energy = np.min(self.event_list_T[1][T_start:T_end])\n self.max_energy = np.max(self.event_list_T[1][T_start:T_end])\n self.min_time = np.min(self.event_list_T[0][T_start:T_end])\n self.max_time = np.max(self.event_list_T[0][T_start:T_end])", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def add_default_options(self, argprs):\n argprs.add_argument(\"-c\", \"--completed\", dest=\"completed\", default=None,\n metavar=\"FILE\",\n help=\"Specify FILE of completed OB keys\")\n argprs.add_argument(\"--date-start\", dest=\"date_start\", default=None,\n help=\"Define the start of the schedule ('YYYY-MM-DD HH:MM')\")\n argprs.add_argument(\"--date-stop\", dest=\"date_stop\", default=None,\n help=\"Define the end of the schedule ('YYYY-MM-DD HH:MM')\")\n argprs.add_argument(\"--display\", dest=\"display\", metavar=\"HOST:N\",\n help=\"Use X display on HOST:N\")\n argprs.add_argument(\"-g\", \"--geometry\", dest=\"geometry\",\n metavar=\"GEOM\", default=None,\n help=\"X geometry for initial size and placement\")\n argprs.add_argument(\"-i\", \"--input\", dest=\"input_dir\", default=\".\",\n metavar=\"DIRECTORY\",\n help=\"Read input files from DIRECTORY\")\n argprs.add_argument(\"-f\", \"--format\", dest=\"input_fmt\", default=None,\n metavar=\"FILE_FORMAT\",\n help=\"Specify input file format (csv, xls, or xlsx)\")\n argprs.add_argument(\"--norestore\", dest=\"norestore\", default=False,\n action=\"store_true\",\n help=\"Don't restore the GUI from a saved layout\")\n ## argprs.add_argument(\"--modules\", dest=\"modules\", metavar=\"NAMES\",\n ## help=\"Specify additional modules to load\")\n argprs.add_argument(\"--numthreads\", dest=\"numthreads\", type=int,\n default=30,\n help=\"Start NUM threads in thread pool\", metavar=\"NUM\")\n argprs.add_argument(\"-o\", \"--output\", dest=\"output_dir\", default=None,\n metavar=\"DIRECTORY\",\n help=\"Write output files to DIRECTORY\")\n argprs.add_argument(\"-s\", \"--site\", dest=\"sitename\", metavar=\"NAME\",\n default='subaru',\n help=\"Observing site NAME\")\n argprs.add_argument(\"-t\", \"--toolkit\", dest=\"toolkit\", metavar=\"NAME\",\n default=None,\n help=\"Prefer GUI toolkit (default: choose one)\")\n argprs.add_argument('--version', action='version',\n version='%(prog)s v{version}'.format(version=__version__),\n help=\"Show the qplan version and exit\")\n log.addlogopts(argprs)", "def test_range__no_base_date(self):\n data = self._data()\n data.pop('base_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)", "def run(argparser: ArgumentParser) -> None:\n args = argparser.parse_args()\n\n # Parse datetime args\n if getattr(args, \"dtstart\", None):\n args.dtstart = datetime.strptime(args.dtstart, \"%Y-%m-%d\")\n\n if getattr(args, \"dtend\", None):\n args.dtend = datetime.strptime(args.dtend, \"%Y-%m-%d\").replace(\n hour=23, minute=59, second=59\n )\n\n if getattr(args, \"begin\", None):\n args.begin = datetime.strptime(args.begin, \"%Y-%m-%d\")\n\n # Execute selected function\n if args.func:\n args.func(args)\n else:\n argparser.print_help()", "def __init__(self, orcid, start, stop):\n self.id = orcid.replace(\"-\", \"\")\n self.start = Date(*start.split(\"-\"))\n self.stop = Date(*stop.split(\"-\")) if stop else Date(None, None, None)", "def __init__(self, dt=60*60*24):\n pass", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n program_text: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None,\n url: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if end_time is not None:\n pulumi.set(__self__, \"end_time\", end_time)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if program_text is not None:\n pulumi.set(__self__, \"program_text\", program_text)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if time_range is not None:\n pulumi.set(__self__, \"time_range\", time_range)\n if url is not None:\n pulumi.set(__self__, \"url\", url)", "def __init__(self, start_time, end_time, events, **other_fields):\n self._start_time = start_time\n self._end_time = end_time\n self._events = events", "def main():\n # dates lists for testing\n dates = [\n datetime.date(2010, 1, 15),\n datetime.date(2012, 6, 29)\n ]\n targets = [\n datetime.date(2000, 1, 1),\n datetime.date(2016, 10, 3)\n ]\n #loop through cases\n for d in dates:\n for t in targets:\n #calculate differences\n dayDiff = diffDates(d, t, \"days\")\n monthDiff = diffDates(d, t, \"months\")\n yearDiff = diffDates(d, t, \"years\")\n #create dictionary for printing\n vals = {\"day\":dayDiff, \"month\":monthDiff, \"year\":yearDiff}\n #print out values\n for period in vals:\n diff = vals[period]\n period = str(period) + (\"s\" if diff != 1 else \"\")\n print \"There are {0} {1} between {2} and {3}\".format(diff, period, t, d)", "def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}", "def test_date_range(self):\n\n url = '/%s/job-types/status/?started=%s&ended=%s' % ( self.api,\n '2015-01-01T00:00:00Z',\n '2015-01-02T00:00:00Z')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)", "def __init__(__self__, *,\n program_text: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"program_text\", program_text)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if end_time is not None:\n pulumi.set(__self__, \"end_time\", end_time)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if time_range is not None:\n pulumi.set(__self__, \"time_range\", time_range)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def __init__(self, stop=None, days: float = 14, no_exclude: bool = False):\n self.stop = CxoTime(stop)\n self.days = days\n self.start: CxoTime = self.stop - days * u.day\n self.no_exclude = no_exclude", "def _get_start_date(self):\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(\n today.year,\n ((quarter - 1) * 3) + 1,\n 1\n )\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, \"%Y-%m-%d\").date()\n except Exception as e:\n raise ParseError(\"start argument not valid\")\n\n self.start_date = start_date", "def test_loans_default_durations(testdata):\n tomorrow = arrow.utcnow() + timedelta(days=1)\n not_overdue_end_date = tomorrow.date().isoformat()\n for duration_func in (\n circulation_default_loan_duration,\n circulation_default_extension_duration,\n ):\n FAKE_LOAN_ITEM_NO_RESTRICTIONS[\"end_date\"] = not_overdue_end_date\n assert duration_func(FAKE_LOAN_ITEM_NO_RESTRICTIONS, None) == timedelta(weeks=4)\n\n FAKE_LOAN_ITEM_ONE_WEEK[\"end_date\"] = not_overdue_end_date\n assert duration_func(FAKE_LOAN_ITEM_ONE_WEEK, None) == timedelta(weeks=1)\n\n FAKE_LOAN_ITEM_TWO_WEEKS[\"end_date\"] = not_overdue_end_date\n assert duration_func(FAKE_LOAN_ITEM_TWO_WEEKS, None) == timedelta(weeks=2)\n\n FAKE_LOAN_ITEM_THREE_WEEKS[\"end_date\"] = not_overdue_end_date\n assert duration_func(FAKE_LOAN_ITEM_THREE_WEEKS, None) == timedelta(weeks=3)", "def test_parse_valid_time_range(self):\n from azure.servicefabric.models.time_range import (\n TimeRange\n )\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_range({\n 'StartTime': {\n 'Hour': 0,\n 'Minute': 0\n },\n 'EndTime': {\n 'Hour': 23,\n 'Minute': 59,\n }\n })\n\n self.assertIsInstance(res, TimeRange)\n\n self.assertIsInstance(res.start_time, TimeOfDay)\n self.assertEqual(res.start_time.hour, 0)\n self.assertEqual(res.start_time.minute, 0)\n\n self.assertIsInstance(res.end_time, TimeOfDay)\n self.assertEqual(res.end_time.hour, 23)\n self.assertEqual(res.end_time.minute, 59)", "def __init__(self, begin, end):\n begin = datetime(begin.year, begin.month, begin.day)\n self.begin_ts = int((begin-datetime(1970,1,1)).total_seconds())\n end = datetime(end.year, end.month, end.day)\n self.end_ts = int((end-datetime(1970,1,1)).total_seconds())", "def test_range_query(self):\r\n start = datetime(*self.base_date.timetuple()[:3])\r\n end = start + timedelta(days=3)\r\n\r\n results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end)\r\n assert len(results) == 3", "def test_create_one_start(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 10002, 12001, {\"from\": accounts[2]})\n check_ranges([(1, 10001)], [(10001, 10002), (12001, 20001)], [(20001, 30001)], [(10002, 12001)])", "def __init__(self, dateStart, dateEnd): \n #TODO: Raise an exception if dateEnd<dateStart.\n super(dateGenerator,self).__init__()\n d = dateEnd - dateStart\n self._startDate = dateStart\n self._dateDiffSeconds = d.days * 86400 + d.seconds", "def test_all(self):\n\n # year = 1980 #unused\n date = datetime.date(1980, 1, 1)\n while date < datetime.date(1981, 1, 1):\n if date.month <= 4:\n mindate, maxdate = datetime.date(1980, 1, 1), datetime.date(1980, 4, 30)\n elif date.month <= 8:\n mindate, maxdate = datetime.date(1980, 5, 1), datetime.date(1980, 8, 31)\n else:\n mindate, maxdate = datetime.date(1980, 9, 1), datetime.date(1980, 12, 31)\n\n startdate, enddate = get_tertialspan(date)\n self.assertTrue(startdate >= mindate)\n self.assertTrue(startdate <= maxdate)\n self.assertTrue(enddate >= mindate)\n self.assertTrue(enddate <= maxdate)\n\n date += datetime.timedelta(days=1)", "def check_export_start_date(export_start_dates, export_end_dates,\n export_day_range):\n for test_type in TEST_TYPES:\n if export_start_dates[test_type] == \"\":\n export_start_dates[test_type] = datetime(2020, 5, 26)\n else:\n export_start_dates[test_type] = datetime.strptime(\n export_start_dates[test_type], '%Y-%m-%d')\n # Only export data from -45 days to -5 days\n export_start_dates[test_type] = compare_dates(\n export_end_dates[test_type] - timedelta(days=export_day_range),\n export_start_dates[test_type], \"l\")\n if test_type == \"covid_ag\":\n export_start_dates[test_type] = compare_dates(\n export_start_dates[test_type], datetime(2020, 5, 26), \"l\")\n return export_start_dates", "def change_default_range(networks, number_excluded_ips,\n cut_from_start=True):\n for default_network in filter(\n lambda x: ((x['name'] != 'fuelweb_admin')and\n (x['name'] != 'private')),\n networks):\n default_range = [netaddr.IPAddress(str(ip)) for ip\n in default_network[\"ip_ranges\"][0]]\n if cut_from_start:\n new_range = [default_range[0],\n default_range[0] + number_excluded_ips]\n else:\n new_range = [default_range[0] + number_excluded_ips + 1,\n default_range[1]]\n default_network[\"ip_ranges\"][0] = [str(ip)\n for ip in new_range]", "def test_dates_must_be_in_order_happy(make_one):\n w = make_one(\"TEST-1\")\n w.queued_at = datetime.datetime.now() - datetime.timedelta(days=14)\n w.started_at = datetime.datetime.now() - datetime.timedelta(days=4)\n w.ended_at = datetime.datetime.now()\n\n w.check_dates()", "def test_create_start_using_datetime(self):\n jan = datetime.datetime(year=2012, month=12, day=1, hour=12, minute=12, second=23)\n t = self.create_request_object(dataset_type=\"raw\", formoid=\"DM\", start=jan)\n self.assertEqual(\"Mediflex\", t.project_name)\n self.assertEqual(\"Prod\", t.environment_name)\n self.assertEqual(\"studies/Mediflex(Prod)/versions/1001/datasets/raw/DM?start=2012-12-01T12%3A12%3A23\", t.url_path())", "def __init__(self, start_time: datetime=None, end_time: datetime=None, rates: List[Rates]=None):\n self.openapi_types = {\n 'start_time': datetime,\n 'end_time': datetime,\n 'rates': List[Rates]\n }\n\n self.attribute_map = {\n 'start_time': 'startTime',\n 'end_time': 'endTime',\n 'rates': 'rates'\n }\n\n self._start_time = start_time\n self._end_time = end_time\n self._rates = rates", "def default_start(self, data):\n return {}", "def default_start(self, data):\n return {}", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n program_text: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def __init__(self, rate, from_weekday, to_weekday, from_hour, to_hour):\n self.from_weekday = from_weekday\n self.to_weekday = to_weekday\n self.from_hour = from_hour\n self.to_hour = to_hour\n self.rate = rate", "def time_range(start, *, periods=50, spacing=None, end=None, format=None, scale=None):\n start = Time(start, format=format, scale=scale)\n\n if spacing is not None and end is None:\n result = start + spacing * np.arange(0, periods)\n\n elif end is not None and spacing is None:\n end = Time(end, format=format, scale=scale)\n result = start + (end - start) * np.linspace(0, 1, periods)\n\n else:\n raise ValueError(\"Either 'end' or 'spacing' must be specified\")\n\n return result", "def test_create_start_using_datetime(self):\n jan = datetime.datetime(year=2012, month=12, day=1, hour=12, minute=12, second=23)\n t = self.create_request_object(dataset_type=\"raw\", formoid=\"DM\", start=jan)\n self.assertEqual(\"Mediflex\", t.project_name)\n self.assertEqual(\"Prod\", t.environment_name)\n self.assertEqual(\"studies/Mediflex(Prod)/datasets/raw/DM?start=2012-12-01T12%3A12%3A23\", t.url_path())", "def __init__(self, ticker, start, *args):\n self.ticker = ticker.upper()\n self.interval = \"1d\"\n self.cookie, self.crumb = self.init()\n\n self.start = int(time.mktime(dt.datetime(start[0],start[1],start[2]).timetuple()))\n\n if args:\n end = args[0]\n self.end = int(time.mktime(dt.datetime(end[0],end[1],end[2]).timetuple()))\n else:\n self.end = int(time.time())\n\n self.url = self.api_url % (self.ticker, self.start, self.end, self.interval, self.crumb)", "def _setupDowntime(self, s, config):\n snames = ('schedDowntime', 'unschedDowntime')\n if s not in snames:\n raise Exception('t should be one of %s' %(snames))\n filename = config['%s_datafile' %(s)]\n file = open(filename, 'r')\n # Read the data file.\n # Assume that downtime conf files are formatted as a series of\n # entries, giving startNight & duration for each downtime period.\n print '# Reading downtime data file %s' %(filename)\n startdates = []\n durations = []\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n values = line.split()\n if len(values)>0:\n if values[0] == 'startNight':\n startdates.append(int(values[2]))\n if values[0] == 'duration':\n durations.append(int(values[2])) \n file.close()\n # Translate startNight & duration into a list of dates covering all downtime in the survey.\n self.downdates[s] = []\n for start, dur in zip(startdates, durations):\n for i in range(0, dur):\n self.downdates[s].append(start+i)\n self.downdates[s] = numpy.array(self.downdates[s], int)\n #print self.downdates[s]\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d downtime nights from %s file. ' %(len(self.downdates[s]), filename)", "def __parse_init_time(args):\n if args.init_time is None:\n return\n try:\n if args.init_time.isdigit():\n args.init_time=int(args.init_time)\n else:\n args.init_time=datetime.strptime(args.init_time, __DATE_FORMAT)\n except Exception as ex:\n error_exit(str(ex))", "def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time", "def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):", "def default (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 100, last = 199, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = ()): # Auto\n launch(no_flow, network, first, last, count, ip, router, dns)", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n print(\"Checking date \", str(year) + \"/\" + str(start_month) + \"/\" + str(start_day))\n \n\n if not is_valid(year, start_month, start_day) : \n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n if not is_valid(2000, end_month, end_day):\n sys.exit(\"Ending month and day must be part of a valid date\")\n count_days(year,start_month,start_day,end_month,end_day)", "def setup( self ):\n super( TimeGraph, self ).setup()\n\n if 'span' in self.metadata and isinstance(self.metadata['span'], \\\n types.StringType):\n self.metadata['span'] = float(self.metadata['span'])\n\n vars = dict(self.vars)\n\n do_croptime = str(find_info('croptime', self.metadata, self.kw,False)).\\\n lower().find('t') >= 0\n if do_croptime:\n begin = numpy.inf; end = 0\n for pivot, groups in self.parsed_data.items():\n for timebin, data in groups.items():\n begin = min( to_timestamp(timebin), begin )\n end = max( to_timestamp(timebin), end )\n end += self.metadata.get('span', 0)\n else:\n begin = to_timestamp(find_info( self.starttime_str, vars,\n self.metadata, time.time()-24*3600))\n end = to_timestamp(find_info(self.endtime_str,vars, self.metadata,\n time.time()))\n\n self.begin = begin; self.end = end\n self.begin_datetime = datetime.datetime.utcfromtimestamp( float(begin) )\n self.end_datetime = datetime.datetime.utcfromtimestamp( float(end) )\n self.begin_num = date2num( self.begin_datetime )\n self.end_num = date2num( self.end_datetime )\n\n self.width = int(find_info('span', vars, self.metadata, self.time_interval() ))\n\n title = getattr( self, 'title', '' )\n self.title = self.add_time_to_title( title )", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def _start(args=None):\n options = _parse_args(args)\n main(**options)", "def test_parse_valid_active_time_ranges(self):\n from azure.servicefabric.models.time_range import (\n TimeRange\n )\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_active_time_ranges(\n [\n {\n 'StartTime': {\n 'Hour': 0,\n 'Minute': 0\n },\n 'EndTime': {\n 'Hour': 12,\n 'Minute': 0,\n }\n },\n {\n 'StartTime': {\n 'Hour': 12,\n 'Minute': 0\n },\n 'EndTime': {\n 'Hour': 23,\n 'Minute': 59,\n }\n }\n ]\n )\n\n self.assertIsInstance(res, list)\n self.assertEqual(len(res), 2)\n\n self.assertIsInstance(res[0], TimeRange)\n self.assertIsInstance(res[0].start_time, TimeOfDay)\n self.assertEqual(res[0].start_time.hour, 0)\n self.assertEqual(res[0].start_time.minute, 0)\n\n self.assertIsInstance(res[0].end_time, TimeOfDay)\n self.assertEqual(res[0].end_time.hour, 12)\n self.assertEqual(res[0].end_time.minute, 0)\n\n self.assertIsInstance(res[1], TimeRange)\n self.assertIsInstance(res[1].start_time, TimeOfDay)\n self.assertEqual(res[1].start_time.hour, 12)\n self.assertEqual(res[1].start_time.minute, 0)\n\n self.assertIsInstance(res[1].end_time, TimeOfDay)\n self.assertEqual(res[1].end_time.hour, 23)\n self.assertEqual(res[1].end_time.minute, 59)", "def createJoinedCal(rangeLimit1, rangeLimit2):\n\n # find the latest start time and convert it to minutes\n start = max(CTM(rangeLimit1[0]), CTM(rangeLimit2[0]))\n # find the earliest stop time and convert it to minutes\n end = min(CTM(rangeLimit1[1]), CTM(rangeLimit2[1]))\n\n # create a dict containing all minutes between start and end indicating available minutes during the day\n # this is the default without considering meetings\n available = {}\n for i in range(start, end + 1):\n available[i] = True\n return available", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_start_time(self, *args, **kwargs)", "def _validate(self):\n self.params['report date'] = None\n if any(self.params.values()):\n s = self.params['start']\n e = self.params['end']\n cond1 = s is None\n cond2 = e is None\n \n if cond1 and not cond2:\n self.params['report date'] = e\n if not cond1 and cond2:\n self.params['report date'] = s\n if not cond1 and not cond2:\n if s == e:\n self.params['report date'] = s\n else:\n if s > e:\n self.params['start'] = e\n self.params['end'] = s\n else:\n self.params['report date'] = MAX_DATE", "def test_create_start_using_datetime(self):\n jan = datetime.datetime(year=2012, month=12, day=1, hour=12, minute=12, second=23)\n t = self.create_request_object(dataset_type=\"raw\", formoid=\"DM\", start=jan)\n self.assertEqual(\"Mediflex\", t.project_name)\n self.assertEqual(\"Prod\", t.environment_name)\n self.assertEqual(\"studies/Mediflex(Prod)/subjects/1001/datasets/raw/DM?start=2012-12-01T12%3A12%3A23\", t.url_path())", "def __init__( self, *args, **kw ):\n self.starttime_str = 'starttime'\n self.endtime_str = 'endtime'\n self.is_timestamps = True\n self.resize_time_graph = True\n super( TimeGraph, self ).__init__( *args, **kw )", "def __init__(self, startdate, enddate, dateformat, attendees):\n\t\tself.startdate = startdate\n\t\tself.enddate = enddate\n\t\tself.dateformat = re.compile(dateformat)\n\t\tself.attendees = attendees", "def set_start_date(self, date):\n pass", "def __init__(self, date_: date, length: int,\n days_off: Optional[List[date]] = None, workdays: Optional[List[date]] = None,\n workday_style: Optional[Style] = None, day_off_style: Optional[Style] = None):\n self.day_off_style = day_off_style or Style()\n self.workday_style = workday_style or Style()\n\n self.date = date_\n self.length = length\n\n self.workdays = workdays or []\n self.days_off = days_off or []" ]
[ "0.6126687", "0.6049868", "0.5839494", "0.57524276", "0.5737345", "0.57099867", "0.5672743", "0.5655215", "0.560356", "0.5569601", "0.55678964", "0.55200195", "0.55126786", "0.549733", "0.54575974", "0.540262", "0.5393907", "0.5381829", "0.5373057", "0.53664577", "0.5354388", "0.532774", "0.53259605", "0.5319823", "0.52697456", "0.5265264", "0.526301", "0.52604526", "0.52441686", "0.5235324", "0.5230508", "0.5200992", "0.5192612", "0.5190965", "0.5168951", "0.515806", "0.51551545", "0.5150628", "0.5140701", "0.5140066", "0.51197994", "0.51121503", "0.5073003", "0.506926", "0.5062936", "0.50489724", "0.5048221", "0.5048221", "0.50336355", "0.5032833", "0.5028234", "0.50182563", "0.5015883", "0.50110227", "0.49902576", "0.4984386", "0.4977395", "0.49772987", "0.49771675", "0.4976812", "0.49720883", "0.49624887", "0.49621776", "0.49583775", "0.4952959", "0.4951427", "0.49446318", "0.49444577", "0.4942571", "0.49322328", "0.4924899", "0.49215788", "0.49004745", "0.48939407", "0.48936", "0.48930386", "0.48930386", "0.4889719", "0.4885895", "0.4882228", "0.48809874", "0.48799756", "0.48789254", "0.48778927", "0.48762044", "0.48749506", "0.4869079", "0.48641112", "0.48613015", "0.4860474", "0.48556307", "0.4851245", "0.48500857", "0.48487002", "0.4847844", "0.48458663", "0.48454294", "0.48414975", "0.48401776", "0.48366585" ]
0.5581906
9
Read time in a humancompatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats.
def interpret_time( text ): app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpret_time(text):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) # HACK see below\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()\n # HACK Workaround\n # isoformat() on raspberry Pi does not work for some dates\n # far from now. It will fail with an overflow from time stamp out\n # of range while checking for daylight savings time. Workaround is\n # to force the date-time combination into the year 2016, which seems to\n # get the timestamp into a reasonable range. This workaround should be\n # removed when Arrow or Dateutil.tz is fixed.\n # FIXME: Remove the workaround when arrow is fixed (but only after testing\n # on rasp Pi failure is likely due to 32-bit integers on that platform)", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def _parse_time(time_string: str, source: str = \"input\") -> Optional[datetime.datetime]:\n if not time_string:\n return None\n\n format_string = \"%Y-%m-%d\" if source == \"input\" else \"%Y-%m-%dT%H:%M:%SZ\"\n try:\n return datetime.datetime.strptime(time_string, format_string)\n except ValueError:\n raise AnalyzerError(\"Incorrect date format\")", "def _CopyTimeFromStringISO8601(self, time_string):\n if time_string.endswith('Z'):\n time_string = time_string[:-1]\n\n time_string_length = len(time_string)\n\n # The time string should at least contain 'hh'.\n if time_string_length < 2:\n raise ValueError('Time string too short.')\n\n try:\n hours = int(time_string[0:2], 10)\n except ValueError:\n raise ValueError('Unable to parse hours.')\n\n if hours not in range(0, 24):\n raise ValueError('Hours value: {0:d} out of bounds.'.format(hours))\n\n minutes = None\n seconds = None\n microseconds = None\n time_zone_offset = None\n\n time_string_index = 2\n\n # Minutes are either specified as 'hhmm', 'hh:mm' or as a fractional part\n # 'hh[.,]###'.\n if (time_string_index + 1 < time_string_length and\n time_string[time_string_index] not in ('.', ',')):\n if time_string[time_string_index] == ':':\n time_string_index += 1\n\n if time_string_index + 2 > time_string_length:\n raise ValueError('Time string too short.')\n\n try:\n minutes = time_string[time_string_index:time_string_index + 2]\n minutes = int(minutes, 10)\n except ValueError:\n raise ValueError('Unable to parse minutes.')\n\n time_string_index += 2\n\n # Seconds are either specified as 'hhmmss', 'hh:mm:ss' or as a fractional\n # part 'hh:mm[.,]###' or 'hhmm[.,]###'.\n if (time_string_index + 1 < time_string_length and\n time_string[time_string_index] not in ('.', ',')):\n if time_string[time_string_index] == ':':\n time_string_index += 1\n\n if time_string_index + 2 > time_string_length:\n raise ValueError('Time string too short.')\n\n try:\n seconds = time_string[time_string_index:time_string_index + 2]\n seconds = int(seconds, 10)\n except ValueError:\n raise ValueError('Unable to parse day of seconds.')\n\n time_string_index += 2\n\n time_zone_string_index = time_string_index\n while time_zone_string_index < time_string_length:\n if time_string[time_zone_string_index] in ('+', '-'):\n break\n\n time_zone_string_index += 1\n\n # The calculations that follow rely on the time zone string index\n # to point beyond the string in case no time zone offset was defined.\n if time_zone_string_index == time_string_length - 1:\n time_zone_string_index += 1\n\n if (time_string_length > time_string_index and\n time_string[time_string_index] in ('.', ',')):\n time_string_index += 1\n time_fraction_length = time_zone_string_index - time_string_index\n\n try:\n time_fraction = time_string[time_string_index:time_zone_string_index]\n time_fraction = int(time_fraction, 10)\n time_fraction = (\n decimal.Decimal(time_fraction) /\n decimal.Decimal(10 ** time_fraction_length))\n except ValueError:\n raise ValueError('Unable to parse time fraction.')\n\n if minutes is None:\n time_fraction *= 60\n minutes = int(time_fraction)\n time_fraction -= minutes\n\n if seconds is None:\n time_fraction *= 60\n seconds = int(time_fraction)\n time_fraction -= seconds\n\n time_fraction *= definitions.MICROSECONDS_PER_SECOND\n microseconds = int(time_fraction)\n\n if minutes is not None and minutes not in range(0, 60):\n raise ValueError('Minutes value: {0:d} out of bounds.'.format(minutes))\n\n # TODO: support a leap second?\n if seconds is not None and seconds not in range(0, 60):\n raise ValueError('Seconds value: {0:d} out of bounds.'.format(seconds))\n\n if time_zone_string_index < time_string_length:\n if (time_string_length - time_zone_string_index != 6 or\n time_string[time_zone_string_index + 3] != ':'):\n raise ValueError('Invalid time string.')\n\n try:\n hours_from_utc = int(time_string[\n time_zone_string_index + 1:time_zone_string_index + 3])\n except ValueError:\n raise ValueError('Unable to parse time zone hours offset.')\n\n if hours_from_utc not in range(0, 15):\n raise ValueError('Time zone hours offset value out of bounds.')\n\n try:\n minutes_from_utc = int(time_string[\n time_zone_string_index + 4:time_zone_string_index + 6])\n except ValueError:\n raise ValueError('Unable to parse time zone minutes offset.')\n\n if minutes_from_utc not in range(0, 60):\n raise ValueError('Time zone minutes offset value out of bounds.')\n\n # pylint: disable=invalid-unary-operand-type\n time_zone_offset = (hours_from_utc * 60) + minutes_from_utc\n\n if time_string[time_zone_string_index] == '-':\n time_zone_offset = -time_zone_offset\n\n return hours, minutes, seconds, microseconds, time_zone_offset", "def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result", "def parse_time(text):\n try:\n if len(text) == 17:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%MZ')\n elif len(text) == 20:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%M:%SZ')\n else:\n date = datetime.datetime.utcnow()\n except Exception as _:\n date = datetime.datetime.utcnow()\n return date", "def test_parse_time_with_invalid_absolute_datetime(self):\n self.assert_TPVE(parse_time, \"\", None)\n self.assert_TPVE(parse_time, \"blahblah\", None)\n # This is detected as a YYYYMMDD string, but it's invalid.\n self.assert_TPVE(parse_time, \"20150231\", None)\n\n # Graphite accepts the following, we don't.\n self.assert_TPVE(parse_time, \"2015_02_01\", None)\n self.assert_TPVE(parse_time, \"12:35 20150201\", None)\n self.assert_TPVE(parse_time, \"12:3520150201\", None)\n self.assert_TPVE(parse_time, \"12/31/99\", None)\n self.assert_TPVE(parse_time, \"6pm today\", None)\n self.assert_TPVE(parse_time, \"noon tomorrow\", None)\n self.assert_TPVE(parse_time, \"january 1\", None)\n self.assert_TPVE(parse_time, \"monday\", None)", "def validate_and_parse_input(time: str):\n if time is None or not re.match(r'^\\d{1,2}:\\d{1,2}$', time):\n return False\n hour, minute = map(int, time.split(r':'))\n if type(hour) != int or type(minute) != int:\n return False\n\n if 0 <= hour < 24 and 0 <= minute < 60:\n hour = hour % 12\n minute = minute\n return hour, minute\n else:\n return False", "def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def parse_time(time: Union[str, datetime]) -> datetime:\n if isinstance(time, str):\n try:\n from ciso8601 import parse_datetime # pylint: disable=wrong-import-position # noqa: F401\n return parse_datetime(time)\n except (ImportError, ValueError): # pragma: no cover\n return dateutil.parser.parse(time)\n\n return time", "def parse_time(time_input, *, force_datetime=False, allow_undefined=False, **kwargs):\n\n if allow_undefined and time_input in [None, '..']:\n return None\n\n if isinstance(time_input, dt.date):\n if force_datetime and not isinstance(time_input, dt.datetime):\n return date_to_datetime(time_input)\n\n if kwargs.get('ignoretz') and isinstance(time_input, dt.datetime):\n return time_input.replace(tzinfo=None)\n\n return time_input\n\n time = dateutil.parser.parse(time_input, **kwargs)\n if force_datetime or len(time_input) > 10: # This check is not very accurate but it works for iso format\n return time\n return time.date()", "def parse_isotime(timestr):\r\n try:\r\n return iso8601.parse_date(timestr)\r\n except iso8601.ParseError as e:\r\n raise ValueError(unicode(e))\r\n except TypeError as e:\r\n raise ValueError(unicode(e))", "def _change_time_format(time_string):\n datetime_object = parser.isoparse(time_string)\n return datetime_object", "def properTimeInput(time_):\r\n if not time_.isdigit() or len(time_) > 4 or len(time_) < 4 or int(time_) > 2400 or int(time_) < 0 or int(time_[2])>5:\r\n print(\"'\",time_, \"' is an invalid input for the time. Use 24 hr format.\\nExamples: 8 a.m = 0800, 1 p.m = 1300, 2:30 = 1430, 12:50 a.m = 0050\\n\")\r\n return False\r\n return True", "def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None", "def test_parse_no_timezine_strict():\n iso8601.parse_datetime(\"2007-01-01T08:00:00\")", "def try_parsing_date(text):\n for fmt in ('%I %p', '%I %M %p', '%I:%M %p'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:pass\n if \":\" in text:\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text.split(\":\")[0])>=8 else \"PM\"), '%I:%M %p')\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text)>=8 else \"PM\"), '%I %p')", "def read_time(time_string):\n factors = {\n \"n\": 1e-9,\n \"u\": 1e-6,\n \"m\": 1e-3,\n \"s\": 1\n }\n \n # Check that the time string is properly formatted, e. g. time part\n # is followed by the unit part. The string should contain at least two\n # character, otherwise splitting it into two parts will raise an IndexError.\n try:\n number, unit = time_string[:-1], time_string[-1]\n except (IndexError, TypeError):\n raise ValueError(\"Invalid time string given.\")\n\n # If the 'time part' cannot be converted to float, this raises a ValueError.\n number = float(number)\n \n if number < 0:\n raise ValueError(\"Negative time values are not allowed.\")\n \n # Check that a valid time unit was specified. If no unit was specified,\n # then what we call 'unit' will in fact be the last digit of the time value\n # and as we do not use numeric unit symbols, we still get an error.\n try:\n factor = factors[unit]\n except KeyError:\n raise ValueError(\"Invalid time unit given.\")\n\n time = number * factor\n return time", "def test_parse_time(\n test_input: str,\n expected: datetime.time,\n):\n assert tvmaze.parsers.parse_time(test_input) == expected", "def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")", "def _get_date(str_time, time_formats = [\"%Y-%m-%d %H:%M:%S.%f\", \"%Y-%m-%d %H:%M:%S\"]):\r\n time = None\r\n for time_format in time_formats:\r\n try:\r\n time = datetime.strptime(str_time, time_format)\r\n if time:\r\n break\r\n except:\r\n pass\r\n return time", "def parse_time_str(self, time_str):\n try:\n return datetime.strptime(self.force_hour_two_digits(time_str), TIME_FORMAT).time()\n except ValueError:\n return None", "def fromisoformat(cls, time_string):\n if not isinstance(time_string, str):\n raise TypeError(\"fromisoformat: argument must be str\")\n\n try:\n return cls(*_parse_isoformat_time(time_string))\n except Exception:\n raise ValueError(f\"Invalid isoformat string\")", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)", "def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)", "def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue", "def time_trans(datetime_str):\n\t\tif re.compile(\"(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+):(\\d+)\").match(datetime_str):\n\t\t\treturn datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\")", "def test_parse_iso_8601_time_str(self):\n import datetime\n from route53.util import parse_iso_8601_time_str\n self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01Z'),\n datetime.datetime(2013, 7, 28, 1, 0, 1, 0, \\\n tzinfo=UTC()))\n self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01.001Z'),\n datetime.datetime(2013, 7, 28, 1, 0, 1, 1000, \\\n tzinfo=UTC()))", "def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)", "def process_time_input(timestamp):\n if type(timestamp) == datetime.datetime:\n output_time = timestamp\n elif type(timestamp) == str:\n try:\n output_time = datetime.datetime.strptime(timestamp,\n \"%Y-%m-%d\")\n except ValueError:\n try:\n output_time = datetime.datetime.strptime(timestamp,\n \"%Y%j\")\n except ValueError:\n raise ValueError(\"The passed timestamp wasn't either \" +\n 'a \"%Y-%m-%d\" string, a \"%Y%j\" string')\n else:\n raise ValueError(\"You can only use a string or a datetime object\")\n return output_time", "def parse_time(text):\n\n # When keyword is 'in' adds values to time\n if text[-3] == 'in':\n remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())\n # Otherwise try to parse time as written\n else:\n remind_time = text[-1].replace(':', ' ') \\\n + \" \" \\\n + time.strftime(\"%m/%d/%y\", time.gmtime(time.time()))\n remind_time = time.strptime(remind_time, \"%H %M %m/%d/%y\")\n return remind_time", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def test_raw_file_name_to_time_ascii(self):\n\n this_time_unix_sec = probsevere_io.raw_file_name_to_time(\n ASCII_FILE_NAME)\n\n self.assertTrue(this_time_unix_sec == VALID_TIME_UNIX_SEC)", "def test_parse_task_time(self):\n ret = parse_task_time(\"asdf:adsf\")\n self.assertEqual(None, ret)\n\n ret = parse_task_time('\\n')\n self.assertEqual(None, ret)", "def _validate_time(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'format' in mapping:\n token_lines = None # type: Optional[List[List[lexery.Token]]]\n try:\n token_lines = mapry.strftime.tokenize(format=mapping['format'])\n except (lexery.Error, NotImplementedError) as err:\n return SchemaError(str(err), ref='{}/format'.format(ref))\n\n valerr = mapry.strftime.validate_time_tokens(token_lines=token_lines)\n if valerr is not None:\n return SchemaError(str(valerr), ref='{}/format'.format(ref))\n\n return None", "def parse_wcinfotime(timestr):\r\n # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)\r\n m = re.match(r'(\\d+-\\d+-\\d+ \\d+:\\d+:\\d+) ([+-]\\d+) .*', timestr)\r\n if not m:\r\n raise ValueError, \"timestring %r does not match\" % timestr\r\n timestr, timezone = m.groups()\r\n # do not handle timezone specially, return value should be UTC\r\n parsedtime = time.strptime(timestr, \"%Y-%m-%d %H:%M:%S\")\r\n return calendar.timegm(parsedtime)", "def time_convert(time):\n try:\n time_data = str(time)\n if time_data:\n try:\n time_data = datetime.strptime(time_data, '%Y%m%d')\n except Exception:\n time_data = datetime.strptime(time_data, '%Y%m%d%H%M%S')\n time_data = time_data.strftime('%Y-%m-%d')\n return time_data\n except Exception:\n return False", "def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))", "def parse_time(value: str) -> datetime:\n\n try:\n return datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return datetime.min", "def get_time(custom_text):\n fmt = '%H:%M:%S'\n while True:\n clear()\n print(\"Time Format: hours:minutes:seconds --:--:--\\n\")\n print(\"{}\\n\".format(custom_text))\n task_date = input(\"Please input a duration of time: \")\n try:\n datetime.datetime.strptime(task_date, fmt)\n except ValueError:\n print(\"'{}' doesn't seem to be a valid time.\".format(task_date))\n input(\"Press Enter\")\n except AttributeError:\n print(\"'{}' doesn't seem to be a valid time.\".format(task_date))\n input(\"Press Enter\")\n else:\n return datetime.datetime.strptime(task_date, fmt).time()\n break", "def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)", "def is_valid_time(time):\n try:\n dateutil.parser.parse(time)\n return True\n except dateutil.parser.ParserError:\n return False", "def parse_time(dt: str) -> datetime:\n return datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%SZ\")", "def test_parse_valid_time_of_day(self):\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_of_day({\n 'Hour': 23,\n 'Minute': 59\n })\n\n self.assertIsInstance(res, TimeOfDay)\n\n self.assertEqual(res.hour, 23)\n self.assertEqual(res.minute, 59)\n\n res2 = sf_c.parse_time_of_day({\n 'Hour': 0,\n 'Minute': 0\n })\n\n self.assertIsInstance(res2, TimeOfDay)\n\n self.assertEqual(res2.hour, 0)\n self.assertEqual(res2.minute, 0)", "def test_raw_file_name_to_time_ascii_alternative(self):\n\n this_time_unix_sec = probsevere_io.raw_file_name_to_time(\n ALTERNATIVE_ASCII_FILE_NAME)\n\n self.assertTrue(this_time_unix_sec == VALID_TIME_UNIX_SEC)", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def time_from_string(time):\n _type = type(time)\n try:\n if _type == datetime.time:\n return time\n elif _type == datetime.datetime:\n return datetime.datetime.time(time)\n else:\n try:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%I:%M %p'))\n except ValueError:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%H:%M:%S'))\n except ValueError:\n return time\n except TypeError:\n return time", "def convert_time(cls, time_str):\n if cls.date_ignore_pattern:\n time_str = re.sub(cls.date_ignore_pattern, '', time_str)\n return datetime.strptime(time_str, cls.date_format)", "async def parse_time(\n self, time_str: str, name: str = None, aware: bool = False, today=False, days_offset=0\n ) -> dt.time:\n return await self.AD.sched.parse_time(time_str, name, aware, today=False, days_offset=0)", "def iso_from_string(inp):\n try: r=inp.split(\".\")[0]\n except: r=inp\n fmt=\"%Y-%m-%dT%H:%M:%S\"\n return datetime.strptime(r, fmt)", "def parse_ms_time(self, date, time_, time_shift, with_precision=False):\n # Derived classes might want to use `self`.\n # pylint: disable=no-self-use\n #\n # Derived classes may need access to `time_shift`.\n # pylint: disable=unused-argument\n #\n # For the time being, I don't add a `with_precision` parameter as in\n # the MS parser because the precision for the DOS format is always a\n # minute and can be set in `MSParser.parse_line`. Should you find\n # yourself needing support for `with_precision` for a derived class,\n # please send a mail (see ftputil.txt/html).\n month, day, year = [\n self._as_int(part, \"year/month/day\") for part in date.split(\"-\")\n ]\n if year >= 1000:\n # We have a four-digit year, so no need for heuristics.\n pass\n elif year >= 70:\n year = 1900 + year\n else:\n year = 2000 + year\n try:\n hour, minute, am_pm = time_[0:2], time_[3:5], time_[5]\n except IndexError:\n raise ftputil.error.ParserError(\"invalid time string '{}'\".format(time_))\n hour, minute = (self._as_int(hour, \"hour\"), self._as_int(minute, \"minute\"))\n if hour == 12 and am_pm == \"A\":\n hour = 0\n if hour != 12 and am_pm == \"P\":\n hour += 12\n server_datetime = self._datetime(year, month, day, hour, minute, 0)\n client_datetime = server_datetime - datetime.timedelta(seconds=time_shift)\n st_mtime = client_datetime.timestamp()\n if st_mtime < 0.0:\n st_mtime_precision = UNKNOWN_PRECISION\n st_mtime = 0.0\n else:\n st_mtime_precision = MINUTE_PRECISION\n if with_precision:\n return st_mtime, st_mtime_precision\n else:\n return st_mtime", "def test_parse_time_with_invalid_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assert_TPVE(parse_time, \"-0\", now)\n self.assert_TPVE(parse_time, \"-12\", now)\n self.assert_TPVE(parse_time, \"-12fortnights\", now)\n self.assert_TPVE(parse_time, \"-20150101\", now)", "async def init_dt(message: discord.Message, time: str, timezone: str):\n timezone = reverse_gmt(timezone)\n\n try:\n dt = pendulum.parse(time, tz=timezone)\n except ValueError:\n await client.say(message, \"Time format not recognized.\")\n return None, None\n\n return dt, timezone", "def datetime_from_iso(iso_time):\n # \"start_time\": \"2015-05-23T10:00:00-0700\",\n # \"end_time\": \"2015-05-23T20:00:00-0700\",\n if iso_time is None:\n return iso_time\n\n try:\n return datetime.strptime(iso_time.rsplit(\"-\", 1)[0], \"%Y-%m-%dT%H:%M:%S\")\n except ValueError:\n try:\n return datetime.strptime(iso_time, \"%Y-%m-%dT%H:%M:%S\") # no UTC offset\n except ValueError:\n try:\n return datetime.strptime(iso_time, \"%Y-%m-%d\") # assume no UTC offset\n except ValueError:\n try:\n return datetime.strptime(iso_time, \"%Y-%m\") # assume no UTC offset\n except ValueError:\n try:\n return datetime.strptime(iso_time, \"%Y\") # assume no UTC offset\n except:\n logging.warning(\"what! iso_time: %s\", iso_time)", "def extract_timestamp(input_time: str) -> int:\n parsers = (parse_sfx_now, parse_sfx_relative_time, parse_timestamp, parse_date)\n for parser in parsers:\n try:\n return parser(input_time)\n except ValueError:\n pass\n print(\n f'ERROR: unrecognized time format {input_time}. Please use either SignalFx relative '\n 'time format, a date or a UNIX epoch timestamp in seconds or milliseconds. ABORTING'\n )\n exit(1)", "def parse_time(time_string):\n times = time_string.split(\"\\n\")\n\n user_time_str = times[-2].split(\"\\t\")[-1]\n sys_time_str = times[-1].split(\"\\t\")[-1]\n\n #print user_time_str, sys_time_str\n\n user_time = parse_m_s(user_time_str)\n sys_time = parse_m_s(sys_time_str)\n\n return user_time + sys_time", "def test_format_optional_time_field(self):\n formatted_time = jiratimereport.format_optional_time_field(99960, \"\")\n expected_result = \"27:46:00\"\n self.assertEqual(expected_result, formatted_time)", "def _strptime(cls, raw: typing.Optional[str]) -> typing.Optional[datetime.datetime]:\n if not raw:\n return None\n return datetime.datetime.strptime(raw, cls._TSFMT)", "def clean_date(raw_time):\n time_stamp = raw_time.split(\" \")\n time_stamp = str(time_stamp[1]+' '+time_stamp[2]+' '+time_stamp[3]+' '+time_stamp[5])\n clean_date_time = parser.parse(time_stamp)\n return clean_date_time", "def test_time_requirement(self):\n test_string = \"AlanTimeZT\"\n test_passes = False\n try:\n self.parser.extract_zt(test_string)\n test_passes = False\n except:\n test_passes = True\n self.assertTrue(test_passes)", "def from_iso(iso, fmt=\"%Y-%m-%dT%H:%M:%S.%f\"):\n # change datetime.datetime to time, return time.struct_time type\n return datetime.strptime(iso, fmt)", "def datetime_from_string(time):\n try:\n if type(time) == datetime.datetime:\n return time\n else:\n try:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')\n except ValueError:\n return time\n except TypeError:\n return time", "def _parse_name_time(self, name):\n time_match = re.search(r'\\d{1,2}:\\d{2}([ apm.]{3,5})?', name)\n if not time_match:\n return name, None\n time_str = time_match.group()\n name = name.replace(time_str, '').strip()\n time_str = time_str.strip().replace('.', '')\n # Default to PM if not AM/PM not provided\n if 'm' not in time_str:\n time_str = '{} pm'.format(time_str)\n return name, datetime.strptime(time_str, '%I:%M %p').time()", "def test_parse_time_exceptions(\n test_input: typing.Any,\n expected: Exception,\n):\n with pytest.raises(expected):\n tvmaze.parsers.parse_time(test_input)", "def _parse_iso_datetime(string):\n try:\n string = string.split('.', 1)[0] # strip out microseconds\n return calendar.timegm(time.strptime(string, '%Y-%m-%dT%H:%M:%S'))\n except ValueError, e:\n raise ValueError('Invalid ISO date/time %r' % string)", "def parse_valid(self):\n # Now lets look for a local timestamp in the product MND or elsewhere\n tokens = TIME_RE.findall(self.unixtext)\n # If we don't find anything, lets default to now, its the best\n if tokens:\n # [('1249', 'AM', 'EDT', 'JUL', '1', '2005')]\n self.z = tokens[0][2].upper()\n self.tz = pytz.timezone(reference.name2pytz.get(self.z, 'UTC'))\n hhmi = tokens[0][0]\n # False positive from regex\n if hhmi[0] == ':':\n hhmi = hhmi.replace(u\":\", \"\")\n if hhmi.find(\":\") > -1:\n (hh, mi) = hhmi.split(\":\")\n elif len(hhmi) < 3:\n hh = hhmi\n mi = 0\n else:\n hh = hhmi[:-2]\n mi = hhmi[-2:]\n dstr = \"%s:%s %s %s %s %s\" % (hh, mi, tokens[0][1], tokens[0][4],\n tokens[0][5], tokens[0][6])\n # Careful here, need to go to UTC time first then come back!\n try:\n now = datetime.datetime.strptime(dstr, \"%I:%M %p %b %d %Y\")\n except ValueError:\n msg = (\"Invalid timestamp [%s] found in product \"\n \"[%s %s %s] header\") % (\" \".join(tokens[0]), self.wmo,\n self.source, self.afos)\n raise TextProductException(self.source[1:], msg)\n now += datetime.timedelta(hours=reference.offsets[self.z])\n self.valid = now.replace(tzinfo=pytz.timezone('UTC'))\n return\n # Search out the WMO header, this had better always be there\n # We only care about the first hit in the file, searching from top\n\n # Take the first hit, ignore others\n wmo_day = int(self.ddhhmm[:2])\n wmo_hour = int(self.ddhhmm[2:4])\n wmo_minute = int(self.ddhhmm[4:])\n\n self.valid = self.utcnow.replace(hour=wmo_hour, minute=wmo_minute,\n second=0, microsecond=0)\n if wmo_day == self.utcnow.day:\n return\n elif wmo_day - self.utcnow.day == 1: # Tomorrow\n self.valid = self.valid.replace(day=wmo_day)\n elif wmo_day > 25 and self.utcnow.day < 15: # Previous month!\n self.valid = self.valid + datetime.timedelta(days=-10)\n self.valid = self.valid.replace(day=wmo_day)\n elif wmo_day < 5 and self.utcnow.day >= 15: # next month\n self.valid = self.valid + datetime.timedelta(days=10)\n self.valid = self.valid.replace(day=wmo_day)\n else:\n self.valid = self.valid.replace(day=wmo_day)", "def parse_influxdb_time(t_str):\n try:\n return datetime.datetime.strptime(t_str[:26].rstrip('Z'), '%Y-%m-%dT%H:%M:%S.%f')\n except ValueError:\n return datetime.datetime.strptime(t_str[:19], '%Y-%m-%dT%H:%M:%S')", "def parse_datetime(date_time, exception_class=Exception):\n try:\n return datetime.datetime.strptime(\n date_time, '%Y-%m-%d %H:%M:%S'\n )\n except Exception as error:\n logging.exception(error)\n raise exception_class(\n 'date time %s format is invalid' % date_time\n )", "def _check_and_convert_time(time_input, assign_default_time=False):\n\n try:\n if isinstance(time_input, str): # input time_input as string\n if time_input.replace(\n \".\", \"\", 1\n ).isdigit(): # input time_input as numeric string\n time_input = (\n float(time_input)\n if \".\" in time_input\n else int(time_input) / 1000.0\n )\n else: # input time_input as datetime string\n time_input = dateutil.parser.parse(time_input).timestamp()\n elif isinstance(\n time_input, int\n ): # input time_input as epoch timestamps in milliseconds\n time_input = time_input / 1000.0\n elif isinstance(time_input, datetime):\n time_input = time_input.timestamp()\n\n datetime.fromtimestamp(time_input) # check current time_input is valid\n except Exception:\n if assign_default_time:\n logging.info(\n \"Cannot convert time_input into timestamps: {}\".format(time_input)\n )\n time_input = time.time()\n else:\n raise ValueError(\n \"Cannot convert time_input into timestamps: {}\".format(time_input)\n )\n\n return time_input", "def test_parseTimeInvalidValues(self):\n invalidStrings = [\n \"invalid-July-2017\",\n \"2-invalid-2017\",\n \"2-July-invalid\",\n ]\n for invalid in invalidStrings:\n self.assertRaises(ValueError, imap4.parseTime, invalid)", "def is_time_in_given_format(time_string, time_format):\n try:\n datetime.strptime(time_string, time_format)\n return True\n except ValueError:\n return False", "def __parse_iso8601(self, s):\n month = day = week_day = 1\n year = hour = minute = seconds = hour_off = min_off = 0\n tznaive = True\n\n iso8601 = iso8601Match(s.strip())\n fields = iso8601 and iso8601.groupdict() or {}\n if not iso8601 or fields.get('garbage'):\n raise IndexError\n\n if fields['year']:\n year = int(fields['year'])\n if fields['month']:\n month = int(fields['month'])\n if fields['day']:\n day = int(fields['day'])\n\n if fields['year_day']:\n d = DateTime('%s-01-01' % year) + int(fields['year_day']) - 1\n month = d.month()\n day = d.day()\n\n if fields['week']:\n week = int(fields['week'])\n if fields['week_day']:\n week_day = int(fields['week_day'])\n d = DateTime('%s-01-04' % year)\n d = d - (d.dow() + 6) % 7 + week * 7 + week_day - 8\n month = d.month()\n day = d.day()\n\n if fields['hour']:\n hour = int(fields['hour'])\n\n if fields['minute']:\n minute = int(fields['minute'])\n elif fields['fraction']:\n minute = 60.0 * float('0.%s' % fields['fraction'])\n seconds, minute = math.modf(minute)\n minute = int(minute)\n seconds = 60.0 * seconds\n # Avoid reprocess when handling seconds, bellow\n fields['fraction'] = None\n\n if fields['second']:\n seconds = int(fields['second'])\n if fields['fraction']:\n seconds = seconds + float('0.%s' % fields['fraction'])\n elif fields['fraction']:\n seconds = 60.0 * float('0.%s' % fields['fraction'])\n\n if fields['hour_off']:\n hour_off = int(fields['hour_off'])\n if fields['signal'] == '-':\n hour_off *= -1\n\n if fields['min_off']:\n min_off = int(fields['min_off'])\n\n if fields['signal'] or fields['Z']:\n tznaive = False\n else:\n tznaive = True\n\n # Differ from the specification here. To preserve backwards\n # compatibility assume a default timezone == UTC.\n tz = 'GMT%+03d%02d' % (hour_off, min_off)\n\n return year, month, day, hour, minute, seconds, tz, tznaive", "def convert_from_iso(s):\n # TODO: Allow for more timezones than just -6 GMT\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%S-06:00\")", "def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)", "def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)", "def is_time(time_string, time_format=''):\n if time_string is None:\n return False\n elif isinstance(time_string, datetime):\n return True\n\n try:\n parse_time(time_string, time_format)\n except ValueError:\n return False\n else:\n return True", "def parse_datetime(timestamp_txt):\n # The datestring is uppercased because the parser cannot handle\n # lowercase timezones.\n timestamp_txt = timestamp_txt.upper().strip()\n # Replace date ranges of the form 2-8 Jun 2014 with the last date in the range.\n # Example article: http://www.promedmail.org/direct.php?id=2539532\n # http://daterangeparser.readthedocs.org/ is not used because it can cause\n # problems when other formats are used. For example, if the year is not\n # included in the range the current year will be used.\n timestamp_txt = re.sub(r\"\\b\\d{1,2}\\-(\\d{1,2}\\s\\w{3,10}\\s\\d{4})\\b\", r\"\\1\", timestamp_txt)\n # A few timestamps have this format. The timezones are removed\n # and the offset is used instead.\n timestamp_txt = re.sub(r\"CST(\\-?\\d)CDT$\", r\"\\1\", timestamp_txt)\n # If an offset is specified the timezone is removed because having\n # both can cause the parser to fail.\n timestamp_txt = re.sub(r\"(\\-\\d{1,4})\\s?[A-Z]{1,5}$\", r\"\\1\", timestamp_txt)\n # The parser fails on some date abbreviations.\n timestamp_txt = re.sub(r\"\\bthurs?\\b\", \"Thursday\", timestamp_txt, flags=re.I)\n timestamp_txt = re.sub(r\"\\btues\\b\", \"Tuesday\", timestamp_txt, flags=re.I)\n timestamp_txt = re.sub(r\"\\bweds\\b\", \"Wednesday\", timestamp_txt, flags=re.I)\n timestamp_txt = re.sub(r\"\\bsept\\b\", \"September\", timestamp_txt, flags=re.I)\n \n # Check for malformed date formats that we know about.\n if re.search(r\"\\-\\d{3}(\\d{2,})?$\", timestamp_txt):\n # timestamps with timezone offsets of a certain number of digits \n # cannot be parsed.\n return None\n if re.search(r\"\\s:\", timestamp_txt):\n # timestamps with spaces before colons are not wellformed.\n return None\n \n try:\n date = dateutil.parser.parse(timestamp_txt,\n tzinfos=getTimeZoneDict(),\n default=datetime.datetime(9999,1,1))\n if date.year >= 9999:\n # The default year was used so the year was missing from the string.\n print \"Missing year in date:\", timestamp_txt\n return None\n else:\n return date\n except ValueError as e:\n print \"Unexpected malformed date:\", timestamp_txt\n return None", "def parse_time_string(time_str, tz=\"US/Pacific\"):\n\n # parsedatetime doesn't handle ISO-8601 time strings (YYYY-MM-DDThh:mm:ss+zz) so\n # try to parse it with arrow first and then use parsedatetime as a fallback (grumble)\n t = None\n try:\n t = arrow.get(time_str)\n # If the input string didn't specify a timezone, fill in the default\n if len(time_str.split(\"+\")) == 1:\n t = t.replace(tzinfo=tz)\n except arrow.parser.ParserError:\n cal = parsedatetime.Calendar()\n parse_result = cal.parse(time_str)\n if parse_result[1] == 0:\n raise ValueError(\"Could not understand time {time}\".format(time=time_str))\n t = arrow.get(parse_result[0]).replace(tzinfo=tz)\n return t.to(\"utc\")", "def test_parse_time_absolute_date(self):\n self.assertEqual(\n parse_time(\"20150201\", None), datetime(2015, 2, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"19700101\", None), datetime(1970, 1, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"19010101\", None), datetime(1901, 1, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"99991231\", None), datetime(9999, 12, 31, 0, 0, 0))", "def __wrapper__(datetime_input,output_format='%m/%d/%Y %H:%M:%S'):\n result = parse_time(time_input,output_format)\n print(\"Result: {}\".format(result))\n return 0", "def str_to_time(my_time):\n time_format = \"%H:%M\"\n try:\n my_time = datetime.strptime(my_time, time_format)\n except:\n my_time = datetime.now()\n\n return my_time", "def parse_iso8601(timestamp: str) -> datetime.datetime:\n regexp = re.compile(r\"(?P<year>\\d{4})\"\n r\"-?\"\n r\"(?P<month>\\d{2})\"\n r\"-?\"\n r\"(?P<day>\\d{2})\"\n r\"T?\"\n r\"(?P<hour>\\d{2})?\"\n r\":?\"\n r\"(?P<minute>\\d{2})?\"\n r\":?\"\n r\"(?P<second>\\d{2})?\"\n r\"(?P<microsec>.\\d+)?\"\n r\"(?P<tz_hour>(Z|(\\+|-)\\d{2}))?\"\n r\":?\"\n r\"(?P<tz_minute>\\d{2})?\"\n )\n\n result = regexp.search(timestamp)\n if result == None:\n raise ValueError(\"Input value is not a timestamp\")\n else:\n year = result.group('year') #returns string\n month = result.group('month')\n day = result.group('day')\n hour = result.group('hour')\n minute = result.group('minute')\n second = result.group('second')\n microsec = result.group('microsec')\n if microsec != None:\n if len(microsec) < 7:\n microsec += (\"0\" * (6 - len(microsec)))\n else:\n microsec_list = list(microsec)\n microsec_list[6:] = []\n microsec = \"\".join(microsec_list)\n\n tz_hour = result.group('tz_hour')\n if tz_hour != None:\n if tz_hour == 'Z':\n tz_hour = 0\n else:\n tz_hour = int(tz_hour)\n\n tz_minute = result.group('tz_minute')\n if tz_minute != None:\n if tz_hour < 0:\n tz_minute = int(tz_minute) * -1\n else:\n tz_minute = int(tz_minute)\n elif tz_hour != None and tz_minute == None:\n tz_minute = 0\n\n if tz_hour != None:\n tz_delta = datetime.timezone(timedelta(hours=tz_hour, minutes=tz_minute))\n else:\n tz_delta = None\n\n my_time = []\n #print(my_time)\n for result in year, month, day, hour, minute, second, microsec:\n if result != None:\n my_time.append(int(result))\n else:\n my_time.append(0)\n #print(my_time)\n max_time = [9999, 12, 31, 23, 59, 59, 999999]\n for time in range(len(my_time)):\n if my_time[time] > max_time[time]:\n raise ValueError(\"Input timestamp is not valid. \" + str(my_time[time])\n + \" > \" + str(max_time[time]))\n\n my_dt = datetime.datetime(my_time[0], my_time[1], my_time[2], my_time[3],\n my_time[4], my_time[5], my_time[6], tzinfo=tz_delta)\n print(my_dt)\n return my_dt", "def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = None\r\n zone = None\r\n\r\n # ACE format\r\n match = re.search(r'(\\d\\d\\d\\d\\d\\d\\d\\d:\\d\\d\\d\\d)', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r':', r'T', d)\r\n return d\r\n\r\n # Already in ISO format\r\n match = re.search(r'(\\d\\d\\d\\d-?\\d\\d-?\\d\\d)(-?(T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?))?', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r'-', r'', d)\r\n h = match.group(3)\r\n if h is not None:\r\n h = re.sub(r':', r'', h)\r\n return d + h\r\n else:\r\n return d\r\n\r\n # some pre-processing\r\n match = re.search('T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?', re.sub('\\s', '', string))\r\n if match is not None:\r\n return re.sub(r':', r'', re.sub('\\s', '', string))\r\n\r\n # extract date\r\n if re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(1))\r\n m = month_to_num(match.group(5))\r\n y = match.group(7)\r\n\r\n elif re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(4))\r\n m = month_to_num(match.group(1))\r\n y = match.group(7)\r\n\r\n elif re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string))\r\n m = match.group(3)\r\n d = match.group(4)\r\n y = match.group(1)\r\n\r\n elif re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string))\r\n m = match.group(1)\r\n d = match.group(3)\r\n y = match.group(4)\r\n\r\n if y is not None:\r\n # check for European style date\r\n if 12 < int(m) <= 31 and int(d) <= 12:\r\n new_d = m\r\n m = d\r\n d = new_d\r\n\r\n # check for 2 digit year\r\n y = normalise_two_digit_year(str(y))\r\n\r\n iso = \"%4d%02d%02d\" % (int(y), int(m), int(d))\r\n\r\n else:\r\n iso = \"XXXXXXXX\"\r\n\r\n # Extract time\r\n match = re.search(r'(\\d?\\d):(\\d\\d)(:(\\d\\d)(\\.\\d+)?)?(([AP])\\.?M\\.?)?(([+\\-]\\d+|[A-Z][SD]T|GMT([+\\-]\\d+)?))?',\r\n re.sub('\\s', '', string), re.I)\r\n if match is not None:\r\n h = match.group(1)\r\n min = match.group(2)\r\n s = match.group(4)\r\n fs = match.group(5)\r\n ampm = match.group(7)\r\n zone = match.group(9)\r\n\r\n if ampm is not None and ampm[0].lower() == 'p':\r\n h = str(int(h) + 12)\r\n\r\n if zone is not None:\r\n zm = re.search(r'(GMT)([+\\-]\\d+)', zone)\r\n if zm is not None:\r\n zone = zm.group(2)\r\n elif zone.lower().find('gmt') > -1:\r\n zone = 'Z'\r\n elif re.search(r'([A-Z])([SD])T', zone) is not None:\r\n zm = re.search(r'([A-Z])([SD])T', zone)\r\n # Timezone offsets from GMT\r\n timezones = {\r\n \"R\": 1,\r\n \"E\": -5,\r\n \"C\": -6,\r\n \"M\": -7,\r\n \"P\": -8\r\n }\r\n if zm.group(1).upper() in timezones:\r\n zone = timezones[zm.group(1).upper()]\r\n if zm.group(2).lower() == 'd':\r\n zone += 1\r\n if zone < 0:\r\n zone = '-%02d00' % (-1 * zone)\r\n else:\r\n zone = '+%02d00' % zone\r\n elif re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I) is not None:\r\n match = re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I)\r\n h = match.group(1)\r\n min = match.group(2)\r\n\r\n if h is not None:\r\n if fs is not None:\r\n fs = re.sub(r'\\.', r'', fs)\r\n iso += 'T%02d%02d%02d.%02d' % (int(h), int(min), int(s), int(fs))\r\n elif s is not None:\r\n iso += 'T%02d%02d%02d' % (int(h), int(min), int(s))\r\n elif min is not None:\r\n iso += 'T%02d%02d' % (int(h), int(min))\r\n\r\n if zone is not None:\r\n iso += zone.lstrip()\r\n\r\n return iso", "def test_space_separator():\n d = iso8601.parse_datetime(\"2007-06-23 06:40:34.00Z\")\n assert d.year == 2007\n assert d.month == 6\n assert d.day == 23\n assert d.hour == 6\n assert d.minute == 40\n assert d.second == 34\n assert d.microsecond == 0\n assert d.tzinfo == iso8601.UTC", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def tedoius_time(time_string):\n start = ['start', 'begin', 'beginning', 'head', 'first']\n end = ['slut', 'end', 'tail', 'finish',\n 'finito', 'fin', 'done', 'finished']\n\n if time_string.lower() in start:\n time_string = \"00:00:00\"\n # We need this exact string for later\n elif time_string.lower() in end:\n return time_string\n elif len(time_string) == 1:\n time_string = f\"00:00:0{time_string}\"\n elif len(time_string) == 2:\n time_string = f\"00:00:{time_string}\"\n elif len(time_string) == 3:\n time_string = f\"00:00{time_string}\"\n elif len(time_string) == 4:\n time_string = f\"00:0{time_string}\"\n elif len(time_string) == 5:\n time_string = f\"00:{time_string}\"\n elif len(time_string) == 6:\n time_string = f\"00{time_string}\"\n elif len(time_string) == 7:\n time_string = f\"0{time_string}\"\n elif len(time_string) > 8:\n raise('Time string too long!')\n return time_string", "def __parse_init_time(args):\n if args.init_time is None:\n return\n try:\n if args.init_time.isdigit():\n args.init_time=int(args.init_time)\n else:\n args.init_time=datetime.strptime(args.init_time, __DATE_FORMAT)\n except Exception as ex:\n error_exit(str(ex))", "def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True", "def extract_time(time):\n\n # Ensure time string is in correct format\n time_regex = re.compile(\"^\\d{1,2}:\\d{2}(am|AM|pm|PM)$\")\n if not time_regex.match(time):\n raise ValueError(\"Time (%s) is not in format HH:MM(am|AM|pm|PM)\" % time)\n\n am_pm = time[AM_PM_POS:]\n hours, minutes = map(int, time[:AM_PM_POS].split(':'))\n\n # Error checking\n if hours not in range(MIN_HR, MAX_HR + 1):\n raise ValueError(\"Hours (%d) is not between [%d, %d]\" % (hours, MIN_HR, MAX_HR))\n if minutes not in range(MIN_MIN, MAX_MIN + 1):\n raise ValueError(\"Minutes (%d) is not between [%d, %d]\" % (minutes, MIN_MIN, MAX_MIN))\n\n # Apply 12-hour to 24-hour time corrections\n hours %= HRS_12\n if am_pm == 'pm' or am_pm == 'PM':\n hours = (hours + HRS_12) % HRS_24\n\n return hours, minutes", "def test_parse_none_time_of_day(self):\n\n res = sf_c.parse_time_of_day(None)\n self.assertIs(res, None)", "def parse_time(time_string, time_format='', **kwargs):\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n ii = [ss.astype(datetime) for ss in time_string]\n # Validate (in an agnostic way) that we are getting a datetime rather than a date\n return np.array([datetime(*(dt.timetuple()[:6])) for dt in ii])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n # Following a comment by the Lead Developer, the Try / except clause\n # is replaced. The Lead Developer thinks that this the try/except\n # clause is related to SunPy's database module.\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n except Exception:\n pass\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))", "def test_raw_file_name_to_time_json(self):\n\n this_time_unix_sec = probsevere_io.raw_file_name_to_time(JSON_FILE_NAME)\n self.assertTrue(this_time_unix_sec == VALID_TIME_UNIX_SEC)", "def test_format_date_time(self):\r\n formatted_date = date_formatter.format_date_time(\"190501:0902\")\r\n self.assertEqual(formatted_date, \"2019-05-01 09:02\")", "def _get_time(string):\n string = string[0:7] # Drop day\n return string.replace(\"-\", \"\")", "def test_parse_no_timezone_no_strict():\n d = iso8601.parse_datetime(\"2007-01-01T08:00:00\", strict=False)\n assert d.year == 2007\n assert d.month == 1\n assert d.day == 1\n assert d.hour == 8\n assert d.minute == 0\n assert d.second == 0\n assert d.microsecond == 0\n assert d.tzinfo == iso8601.UTC", "def extract_time(maybe_time_str: str) -> Optional[str]:\n match = TIMESTAMP_RE.search(maybe_time_str)\n if match is not None:\n return match.group()\n return None", "def _get_datetime(s):\n\n # It would be nice to be able to define a single format string \n # for use with datetime.strptime, but as seen in the examples, \n # Kindle clipping files can express datetimes in slightly \n # different ways. Therefore, we first have to normalize the components \n # of the Kindle datetime string into a consistent form. \n # The normalized form has no commas and always specifies seconds:\n # \"April 22 2018 12:33:10 PM\"\n \n # Use the DATETIME_REGEX regex which contains these groups:\n # 1: Month (en_US spelling of months)\n # 2: Day\n # 3: Year\n # 4: Hour\n # 5: Minute\n # 6: Optional seconds\n # 7: AM/PM (en_US spelling)\n \n month = None\n day = None\n year = None\n hour = None\n minute = None\n seconds = '00' # the Kindle datetime may not specify any seconds\n period = 'AM'\n \n for match in datetime_regex.finditer(s):\n month = match.group(1)\n day = match.group(2).zfill(2) #zero padded, two digits\n year = match.group(3)\n hour = match.group(4).zfill(2) #zero padded, two digits\n minute = match.group(5)\n if match.group(6) is not None:\n seconds = match.group(6)\n period = match.group(7)\n break \n\n\n normalized_string = \"%s %s %s %s:%s:%s %s\" % (month, day, year, hour, minute, seconds, period) \n dt = datetime.datetime.strptime(normalized_string, \"%B %d %Y %I:%M:%S %p\")\n return dt" ]
[ "0.67903686", "0.66916007", "0.6613034", "0.6586266", "0.6439515", "0.64221704", "0.6381868", "0.6360631", "0.63137174", "0.6284302", "0.62411284", "0.62141776", "0.61804426", "0.61793673", "0.6178851", "0.6156706", "0.61117786", "0.6111385", "0.6104648", "0.6101974", "0.6100496", "0.6089722", "0.6082327", "0.607483", "0.6044045", "0.60433596", "0.6010412", "0.6008228", "0.6007424", "0.6006593", "0.59994614", "0.5988094", "0.59765255", "0.5967084", "0.5956675", "0.5928562", "0.59096766", "0.5889892", "0.5889811", "0.58885944", "0.5887636", "0.58645076", "0.585946", "0.5843755", "0.58413774", "0.5837153", "0.5831691", "0.58034426", "0.57952994", "0.5766873", "0.5762156", "0.5745253", "0.5737616", "0.57309145", "0.5730308", "0.57226825", "0.5719443", "0.5710966", "0.57040536", "0.5686548", "0.5684028", "0.567686", "0.5670241", "0.566942", "0.5662044", "0.56590945", "0.5648783", "0.5638189", "0.5607994", "0.5606814", "0.5606464", "0.56039864", "0.55920386", "0.5590765", "0.55879545", "0.5579227", "0.55743796", "0.55686104", "0.5557122", "0.55513406", "0.5546375", "0.5546304", "0.55303395", "0.5524089", "0.55136466", "0.55098206", "0.5505532", "0.5503825", "0.5482901", "0.54795545", "0.546652", "0.54520327", "0.5451317", "0.544671", "0.54371893", "0.5428982", "0.54247797", "0.54139787", "0.5409773", "0.54093456" ]
0.6865028
0
Convert text of date to ISO format used internally, with the local time zone.
def interpret_date( text ): try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = None\r\n zone = None\r\n\r\n # ACE format\r\n match = re.search(r'(\\d\\d\\d\\d\\d\\d\\d\\d:\\d\\d\\d\\d)', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r':', r'T', d)\r\n return d\r\n\r\n # Already in ISO format\r\n match = re.search(r'(\\d\\d\\d\\d-?\\d\\d-?\\d\\d)(-?(T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?))?', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r'-', r'', d)\r\n h = match.group(3)\r\n if h is not None:\r\n h = re.sub(r':', r'', h)\r\n return d + h\r\n else:\r\n return d\r\n\r\n # some pre-processing\r\n match = re.search('T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?', re.sub('\\s', '', string))\r\n if match is not None:\r\n return re.sub(r':', r'', re.sub('\\s', '', string))\r\n\r\n # extract date\r\n if re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(1))\r\n m = month_to_num(match.group(5))\r\n y = match.group(7)\r\n\r\n elif re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(4))\r\n m = month_to_num(match.group(1))\r\n y = match.group(7)\r\n\r\n elif re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string))\r\n m = match.group(3)\r\n d = match.group(4)\r\n y = match.group(1)\r\n\r\n elif re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string))\r\n m = match.group(1)\r\n d = match.group(3)\r\n y = match.group(4)\r\n\r\n if y is not None:\r\n # check for European style date\r\n if 12 < int(m) <= 31 and int(d) <= 12:\r\n new_d = m\r\n m = d\r\n d = new_d\r\n\r\n # check for 2 digit year\r\n y = normalise_two_digit_year(str(y))\r\n\r\n iso = \"%4d%02d%02d\" % (int(y), int(m), int(d))\r\n\r\n else:\r\n iso = \"XXXXXXXX\"\r\n\r\n # Extract time\r\n match = re.search(r'(\\d?\\d):(\\d\\d)(:(\\d\\d)(\\.\\d+)?)?(([AP])\\.?M\\.?)?(([+\\-]\\d+|[A-Z][SD]T|GMT([+\\-]\\d+)?))?',\r\n re.sub('\\s', '', string), re.I)\r\n if match is not None:\r\n h = match.group(1)\r\n min = match.group(2)\r\n s = match.group(4)\r\n fs = match.group(5)\r\n ampm = match.group(7)\r\n zone = match.group(9)\r\n\r\n if ampm is not None and ampm[0].lower() == 'p':\r\n h = str(int(h) + 12)\r\n\r\n if zone is not None:\r\n zm = re.search(r'(GMT)([+\\-]\\d+)', zone)\r\n if zm is not None:\r\n zone = zm.group(2)\r\n elif zone.lower().find('gmt') > -1:\r\n zone = 'Z'\r\n elif re.search(r'([A-Z])([SD])T', zone) is not None:\r\n zm = re.search(r'([A-Z])([SD])T', zone)\r\n # Timezone offsets from GMT\r\n timezones = {\r\n \"R\": 1,\r\n \"E\": -5,\r\n \"C\": -6,\r\n \"M\": -7,\r\n \"P\": -8\r\n }\r\n if zm.group(1).upper() in timezones:\r\n zone = timezones[zm.group(1).upper()]\r\n if zm.group(2).lower() == 'd':\r\n zone += 1\r\n if zone < 0:\r\n zone = '-%02d00' % (-1 * zone)\r\n else:\r\n zone = '+%02d00' % zone\r\n elif re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I) is not None:\r\n match = re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I)\r\n h = match.group(1)\r\n min = match.group(2)\r\n\r\n if h is not None:\r\n if fs is not None:\r\n fs = re.sub(r'\\.', r'', fs)\r\n iso += 'T%02d%02d%02d.%02d' % (int(h), int(min), int(s), int(fs))\r\n elif s is not None:\r\n iso += 'T%02d%02d%02d' % (int(h), int(min), int(s))\r\n elif min is not None:\r\n iso += 'T%02d%02d' % (int(h), int(min))\r\n\r\n if zone is not None:\r\n iso += zone.lstrip()\r\n\r\n return iso", "def _make_iso_time(time: datetime,\n date: datetime,\n time_zone: pytz.timezone) -> str:\n time_combined = time.replace(year=date.year,\n month=date.month,\n day=date.day)\n return time_zone.localize(time_combined).isoformat()", "def convertFromISODate(date):\n if date:\n try:\n datetime_object = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n return date\n else:\n return datetime_object.strftime('%Y-%m-%d')\n else:\n return None", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def convert_date(iso_string): ## ##\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\") ##\n return d.strftime(\"%A %d %B %Y\") ##", "def interpret_date(text):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def datetime_to_isoformat(obj: datetime.datetime) -> str:\n return obj.replace(tzinfo=datetime.timezone.utc).isoformat().replace(\"+00:00\", \"Z\")", "def isoformat(dt):\n return dt.isoformat().replace(\"+00:00\", \"Z\")", "def format_iso(dt, default_tzinfo=local_timezone):\n dt = dt if dt.tzinfo else dt.replace(tzinfo=default_tzinfo)\n return dt.astimezone(utc_timezone).replace(tzinfo=None).isoformat()+'Z'", "def iso_date(self, t=None):\n if t is None:\n t = time.time()\n time_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t))\n\n return time_str", "def convert_from_iso(s):\n # TODO: Allow for more timezones than just -6 GMT\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%S-06:00\")", "def to_iso(dt):\n return dt.strftime(ISO_FORMAT)", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def date_string_to_iso(string):\n date=None\n if string is not None:\n try:\n #if separator is \"-\"\n if \"-\" in string:\n strings=string.split(\"-\")\n else:\n strings=string.split(\"/\")\n\n #~ print \"strings\"\n #~ print strings\n \n #if year is first\n if len(strings[0])==4:\n year, month, day=strings[0], strings[1], strings[2]\n #if year is last\n else:\n #the year must be coded on 4 digits\n year, month, day=strings[2], strings[1], strings[0]\n date=date_split_to_iso(year, month, day)\n except Exception, e:\n print \"pb\", string\n print \"wrong date format\", e\n\n #return None if date string is None\n return date", "def date_to_str(obj: \"date\") -> str:\n return obj.isoformat()", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%I:%M %p %A %d %B %Y')", "def date_to_iso8601(date):\n dateTimeStr = date.strftime('%Y-%m-%dT%H:%M:%S')\n timeZone_Sign = date.strftime('%z')[0:1]\n timeZone_Str = '%s:%s' % (\n date.strftime('%z')[1:3], date.strftime('%z')[3:5]\n )\n return '{dateTimeStr}{tzsign}{timezone}'.format(\n dateTimeStr=dateTimeStr,\n tzsign=timeZone_Sign,\n timezone=timeZone_Str\n ).replace(':', '%3A').replace('+', '%2B')", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime(\"%A %d %B %Y\")", "def format_datestr(v):\n return v.isoformat() + 'Z'", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%A %d %B %Y')", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%A %d %B %Y')", "def stamp2iso(string):\n return str(datetime.fromtimestamp(int(string)).strftime(\"%Y-%m-%dT%H:%M:%S\"))", "def toisostring(dt):\n return dt.format(ISOFORMAT) + 'Z'", "def parse_iso8601(self, date_str, tz=None):\n date = iso8601.parse_date(date_str, default_timezone=None)\n if date.tzinfo:\n return date\n else:\n local_tz = pytz.timezone(tz) if tz else pytz.timezone(\n self.default_tz)\n return local_tz.localize(date)", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def _convert_to_isoformat(date_time):\n if not date_time:\n return None\n if date_time[-1] == \"Z\":\n delta = 0\n timestamp = date_time[:-1]\n else:\n timestamp = date_time[:-6]\n sign, offset = date_time[-6], date_time[-5:]\n delta = int(sign + offset[:1]) * 60 + int(sign + offset[-2:])\n\n check_decimal = timestamp.split(\".\")\n if len(check_decimal) > 1:\n decimal_str = \"\"\n for digit in check_decimal[1]:\n if digit.isdigit():\n decimal_str += digit\n else:\n break\n if len(decimal_str) > 6:\n timestamp = timestamp.replace(decimal_str, decimal_str[0:6])\n\n if delta == 0:\n tzinfo = TZ_UTC\n else:\n tzinfo = timezone(datetime.timedelta(minutes=delta))\n\n try:\n deserialized = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n deserialized = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S\")\n\n deserialized = deserialized.replace(tzinfo=tzinfo)\n return deserialized", "def to_iso_datetime(value: Union[datetime.datetime, datetime.time]) -> str:\n retval = value.isoformat()\n if value.tzinfo is None:\n retval += 'Z'\n else:\n # replace +00:00 timezone with Z\n retval = re.sub('[+-]00:00$', 'Z', retval)\n return retval", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def american_date_to_iso(connection):\n _update_date_by_regexp(connection=connection,\n regexp=\"^[0-9]{2}/[0-9]{2}/[0-9]{4}$\",\n new_value=\"\"\"CONCAT_WS('-',\n SUBSTR(cav.attribute_value, 7, 4),\n SUBSTR(cav.attribute_value, 1, 2),\n SUBSTR(cav.attribute_value, 4, 2))\n \"\"\")", "def to_isoformat(self) -> str:\n return self.isoformat()", "def format_iso_date(date, night_date=True):\n if isinstance(date, str):\n date = Time(date, format=\"fits\").datetime\n elif isinstance(date, datetime):\n date = Time(date, format=\"datetime\").datetime\n\n if night_date:\n return (\n date - timedelta(hours=15)\n ).date() # If obs goes up to 15pm it still belongs to day before\n else:\n return date", "def convert_datetime_to_iso(datetime_obj):\r\n return Date().to_json(datetime_obj)", "def normalise_date(text):\n text = text.replace('/', '-')\n text = text.replace(':', '-')\n text = text.replace('.', '-')\n text = text.replace('@', '-')\n return text", "def _external_time_format(int_time):\n simple_iso_time = True\n if simple_iso_time:\n ext_time = int_time.replace(tzinfo=SimpleUtc()).isoformat()\n else:\n ext_time = int_time.isoformat() + \"Z\"\n return ext_time", "def convert_date(self, date_str):\n\t\tdate_obj = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')\n\t\tdate_obj = date_obj.replace(tzinfo=pytz.timezone('UTC'))\n\t\treturn date_obj.astimezone(pytz.timezone(self.time_zone))", "def formatIsoDate(dt, tm):\n \n iso = ''\n if dt != '':\n match = re.search(r'^(\\d{2})/(\\d{2})/(\\d{4})$', dt)\n if not match:\n raise RuntimeError(\"Failed to extract date from %s.\" % dt)\n iso += \"%s-%s-%sT\" % (match.group(3), match.group(1), match.group(2))\n if tm != '':\n match = re.search(r'^(\\d{2}):(\\d{2})', tm)\n if not match:\n raise RuntimeError(\"Failed to extract time from %s.\" % tm)\n iso += \"%s:%s:00Z\" % match.groups()\n else: iso += \"00:00:00Z\"\n return iso", "def serialize_date(dt):\n if dt.tzinfo:\n dt = dt.astimezone(UTC).replace(tzinfo=None)\n return dt.isoformat()", "def format_datetime(self, datetime):\n return datetime.isoformat()", "def get_python_date(self):\n return dateutil.parser.parse(self.iso_date)", "def ISO8601(self):\n if self.timezoneNaive():\n return \"%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d\" % (\n self._year, self._month, self._day,\n self._hour, self._minute, self._second)\n tzoffset = _tzoffset2iso8601zone(_tzoffset(self._tz, self._t))\n return \"%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d%s\" % (\n self._year, self._month, self._day,\n self._hour, self._minute, self._second, tzoffset)", "def from_iso(date_string: str, tz_info: tzinfo = UTC) -> datetime:\n date_string = date_string.replace(\"Z\", \"+00:00\")\n dt = datetime.fromisoformat(date_string)\n try:\n return add_timezone(dt, tz_info)\n except ValueError:\n return convert_timezone(dt, tz_info)", "def datetime_to_iso8601(date_time):\n assert not date_time.utcoffset()\n return date_time.strftime('%Y-%m-%dT%H:%M:%S.%f') + 'Z'", "def format_iso_now():\n return datetime.datetime.utcnow().isoformat()+'Z'", "def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")", "def default(self, obj): # pylint: disable=method-hidden\r\n\r\n if isinstance(obj, datetime):\r\n if obj.tzinfo is None:\r\n # Localize to UTC naive datetime objects\r\n obj = UTC.localize(obj)\r\n else:\r\n # Convert to UTC datetime objects from other timezones\r\n obj = obj.astimezone(UTC)\r\n return obj.isoformat()\r\n elif isinstance(obj, date):\r\n return obj.isoformat()\r\n\r\n return super(DateTimeJSONEncoder, self).default(obj)", "def cleanVitalsDate(date_str):\n if date_str[-1] != 'Z':\n date_str += 'Z'\n return date_str.replace(' ', 'T')", "def _format_date(input_date, day_flag, sep_char=\"-\"):\n date_iso = input_date[6:10] + sep_char + input_date[0:2]\n if day_flag:\n date_iso = date_iso + sep_char + input_date[3:5]\n return date_iso", "def iso8601_date(ts=None):\n ts = _get_gmtime_compatible_timestamp(ts)\n return _time.strftime('%Y-%m-%dT%H:%M:%SZ', _time.gmtime(ts))", "async def test_process_timestamp_to_utc_isoformat() -> None:\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert (\n process_timestamp_to_utc_isoformat(datetime_with_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_without_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_est_timezone)\n == \"2016-07-09T15:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_nst_timezone)\n == \"2016-07-09T13:30:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_hst_timezone)\n == \"2016-07-09T21:00:00+00:00\"\n )\n assert process_timestamp_to_utc_isoformat(None) is None", "def format_js_iso(date):\n return datetime.datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.{0}Z').format(int(round(date.microsecond / 1000.0)))", "def date_to_utc_string(filedate):\n return filedate.isoformat()", "def format_iso8601(obj):\n return obj.strftime('%Y-%m-%dT%H:%M:%SZ')", "def iso8601_to_date(iso_date):\n return parse_datetime(iso_date + 'T00:00:00').date()", "def isoformat_now():\n return datetime_isoformat(datetime.datetime.utcnow())", "def _serialize_date(val):\n return date_to_iso8601(val)", "def date_to_iso8601(date):\n return '%s-%02d-%02d' % (date.year, date.month, date.day)", "def iso_from_string(inp):\n try: r=inp.split(\".\")[0]\n except: r=inp\n fmt=\"%Y-%m-%dT%H:%M:%S\"\n return datetime.strptime(r, fmt)", "def get_isodate(date_str):\n iso_date = None\n\n if not date_str:\n return None\n\n #first, is it already a valid isodate?\n try:\n isodate.parse_date(date_str)\n return date_str\n except isodate.ISO8601Error, e:\n # if not, try to parse it\n try:\n iso_date = isodate.date_isoformat(parse(date_str))\n except Exception, e:\n log.msg(e.message, level=log.WARNING)\n return None\n\n return iso_date", "def datetime_from(text):\n eastern = pytz.timezone(\"US/Eastern\")\n if text.endswith(\"T00:00:00\"):\n text = text[:-len(\"T00:00:00\")]\n time = datetime.strptime(text, \"%Y-%m-%d\")\n time = time.replace(hour=23, minute=59, second=59)\n time = eastern.localize(time)\n return time.astimezone(pytz.utc)", "def for_json(self) -> str:\n return self.isoformat()", "def test_gen_iso_datetime_str(self):\n\n est = pytz.timezone(\"EST\")\n some_date = datetime.datetime(\n year=1985, month=11, day=15,\n hour=6, minute=0,\n tzinfo=est)\n\n # Generate an ISO datetime string, and parse it. This will convert it\n # from EST to UTC.\n parsed_dtime = parse_datetime(gen_iso_datetime_str(some_date))\n # EST is -5, so the hour should now be 11.\n self.assertEqual(parsed_dtime.hour, 11)\n # tzinfo will be UTC, since we converted it upon parsing.\n self.assertIs(parsed_dtime.tzinfo, UTC_TZINFO)", "def timestamp_to_iso_format(timestamp):\n if timestamp is None:\n return None\n return datetime.isoformat(datetime.utcfromtimestamp(int(timestamp)))", "def _parse_ISO8601_date(date: str) -> datetime.datetime:\n try:\n dt = parser.isoparse(date)\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n return dt\n return dt.astimezone()\n except ValueError:\n raise ValidationError(\n \"Expecting date in ISO8601 format, eg. 2018-08-01T00:00:00Z, \"\n f\"gets {date} instead.\"\n )", "def formatISODT(dt):\n\tif dt is None:\n\t\treturn None\n\treturn dt.replace(microsecond=0, tzinfo=None).isoformat()+\"Z\"", "def from_iso(dt_str):\n return datetime.datetime.strptime(dt_str, ISO_FORMAT)", "def datetimetostr(dt):\n if dt.utcoffset() is not None:\n return dt.isoformat()\n else:\n return \"%sZ\" % dt.isoformat()", "def datetime_to_str(obj: \"datetime\") -> str:\n if obj.tzinfo is not None and obj.tzinfo.utcoffset(obj) is not None:\n # aware time; translate to UTC\n obj = obj.astimezone(timezone.utc)\n obj = obj.replace(tzinfo=None)\n return obj.isoformat() + \"Z\"", "def dt_to_str(dt):\n return dt.isoformat()", "def default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()", "def default(self, obj):\n if isinstance(obj, (dt.date, dt.datetime)):\n return obj.isoformat()", "def _to_report_datetime(date_time: str, include_time: bool = True, expiry: bool = False):\n if len(date_time) < 10: # Legacy may be empty string.\n return date_time\n if len(date_time) == 10: # Legacy has some date only data.\n report_date = model_utils.date_from_iso_format(date_time)\n return report_date.strftime('%B %-d, %Y')\n zone = date_time[20:]\n local_datetime = None\n if not zone.endswith('00'): # Coming from legacy, already local so ignore timezone adjustment.\n local_datetime = model_utils.ts_from_iso_format_local(date_time)\n # current_app.logger.info(f'zone={zone} date_time={date_time}')\n else:\n local_datetime = model_utils.to_local_timestamp(model_utils.ts_from_iso_format(date_time))\n if include_time:\n timestamp = local_datetime.strftime('%B %-d, %Y at %-I:%M:%S %p Pacific time')\n if timestamp.find(' AM ') > 0:\n return timestamp.replace(' AM ', ' am ')\n return timestamp.replace(' PM ', ' pm ')\n\n return local_datetime.strftime('%B %-d, %Y')", "def parse_iso_date(value):\n try:\n datetime.strptime(value, \"%Y-%m-%d\")\n return value\n except Exception:\n return None", "def rfc3339date(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds=-time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds=time.altzone)\n return date.strftime('%Y-%m-%dT%H:%M:%SZ')", "def date_to_str(date):\r\n return datetime.strftime(date,'%Y-%m-%dT%H:%M:%S.%fZ')", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def get_formatted_date(self, date):\n\n formatted_date = date\n\n possible_datetime_formats = [\n \"%Y-%m-%dT%H:%M:%S%z\", # \"2021-10-19T16:46:02Z\"\n \"%a, %d %b %Y %H:%M:%S %z\", # \"Tue, 19 Oct 2021 21:00:13 +0300\"\n \"%a, %d %b %Y %H:%M:%S %Z\", # \"Tue, 19 Oct 2021 18:54:00 GMT\"\n \"%a, %d %b %Y %H:%M:%S\", # \"Tue, 19 Oct 2021 18:54:00\"\n ]\n\n for format in possible_datetime_formats:\n try:\n formatted_date = datetime.strptime(date, format).strftime(\"%Y%m%d\")\n except:\n pass\n return formatted_date", "def format_date_iso(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y-%m-%d')", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def serialize_iso(attr):\n if not attr:\n return None\n if isinstance(attr, str):\n attr = isodate.parse_datetime(attr)\n try:\n utc = attr.utctimetuple()\n if utc.tm_year > 9999 or utc.tm_year < 1:\n raise OverflowError(\"Hit max or min date\")\n\n date = \"{:04}-{:02}-{:02}T{:02}:{:02}:{:02}\".format(\n utc.tm_year, utc.tm_mon, utc.tm_mday,\n utc.tm_hour, utc.tm_min, utc.tm_sec)\n return date + 'Z'\n except (ValueError, OverflowError) as err:\n msg = \"Unable to serialize datetime object.\"\n raise_with_traceback(ValueError, msg, err)\n except AttributeError as err:\n msg = \"ISO-8601 object must be valid Datetime object.\"\n raise_with_traceback(TypeError, msg, err)", "def date_split_to_iso(year, month, day):\n year=int(year)\n month=int(month)\n day=int(day)\n if month>12:\n #month and day are reversed\n temp=month\n month=day\n day=temp\n return date(year, month, day).isoformat()", "def date_json_to_py(iso_date):\n\n return date.fromisoformat(iso_date)", "def get_date(self, date_str, timezone=None):\n if timezone is None:\n timezone = self.tz_local\n\n dt = dateutil.parser.parse(date_str)\n date = dt.replace(tzinfo=timezone)\n return date", "def isoformat(self):\n return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)", "def nowISO():\n return dt2ISO(datetime.datetime.utcnow())", "def isoformat(self):\n return \"\"", "def parse_isodate(iso_date):\n date = None\n\n try:\n date = isodate.parse_date(iso_date)\n except Exception, e:\n log.msg(e.message, level=log.WARNING)\n\n return date", "def main():\n date_time_conversion('2018-12-30T09:37:56.000001Z', '2020-07-12T07:56:43.000001Z', 0, 0, 0, 0)", "def _parse_date(s):\n return parse(s).astimezone(pytz.utc)", "def ts_datetime(val):\n return val.isoformat() + \"Z\"", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def nepalinow(format=\"%B %d, %Y, %A\"):\n\treturn to_nepali_datetime(timezone.now()).strftime(format)", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def date2text(date=None):\n return date.strftime('%-d %B %Y')", "def iso8601(val):\n dt = datetime.datetime.utcfromtimestamp(int(int(val)/1000))\n return pytz.UTC.localize(dt).isoformat()", "def test_date_time_formatter():\n # given\n utc_dt = datetime.utcnow()\n expected_datetime_string = \"2015-10-21T05:29:00\"\n actual_datetime_string = utils.format_utc_to_local_tz(utc_dt)\n\n # then\n assert actual_datetime_string == expected_datetime_string", "def extract_date_from_iso_time(t: str) -> typing.Optional[datetime.date]:\n if not t:\n return None\n\n date, _ = t.split('T')\n return datetime.date.fromisoformat(date)", "def from_isoformat(cls, date_string: str) -> Date:\n return cls.fromisoformat(date_string)", "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def parse_datetime(iso):\n iso = iso.replace(',', '.')\n return iso8601.parse_date(iso).astimezone(pytz.utc).replace(tzinfo=None)", "def date2str(datetime_object):\n if datetime_object is None:\n return 'None'\n return datetime_object.strftime('%Y-%m-%dT%H:%M:%S.%f')[0:-3]" ]
[ "0.6905238", "0.65572876", "0.6553589", "0.64506024", "0.64370036", "0.6369056", "0.6347456", "0.62363726", "0.6200606", "0.6184233", "0.6160375", "0.6148745", "0.61423504", "0.6141841", "0.6125747", "0.60974556", "0.606667", "0.60619223", "0.6043991", "0.60352457", "0.60352457", "0.598807", "0.59829974", "0.5967835", "0.59618247", "0.5956251", "0.5947979", "0.5935931", "0.59106636", "0.5909712", "0.5895294", "0.5880845", "0.58733755", "0.5871624", "0.5832768", "0.57990295", "0.5796441", "0.5791963", "0.57781595", "0.5773086", "0.57616055", "0.57499146", "0.5736913", "0.5731594", "0.5725245", "0.5718129", "0.5709186", "0.5699445", "0.5674318", "0.56705195", "0.5634854", "0.5602943", "0.56005484", "0.55599934", "0.55586994", "0.5515937", "0.5508633", "0.5496273", "0.54910654", "0.5485196", "0.54562324", "0.5453201", "0.54512846", "0.5428225", "0.5416751", "0.54136664", "0.53992426", "0.5387871", "0.5386173", "0.53817964", "0.53794026", "0.53737247", "0.53643423", "0.5363442", "0.5339125", "0.53359616", "0.533515", "0.5328569", "0.53274995", "0.5326872", "0.5321316", "0.53112996", "0.5307484", "0.53037167", "0.5302052", "0.5301736", "0.5286995", "0.52836263", "0.52797425", "0.527697", "0.52706826", "0.52618796", "0.52586824", "0.52509934", "0.5250163", "0.5247191", "0.52428275", "0.5242155", "0.52380985", "0.5236901" ]
0.6582443
1
ISO date + 1 day (used in query to Google calendar)
def next_day(isotext): as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date", "def get_date():\n\n return datetime.datetime.utcnow().isoformat()", "def get_date(date):\n return date", "def get_date(self):\n raise Unimplemented()", "def date(self):\n try:\n return datetime.date.fromordinal(self.round)\n except ValueError:\n raise ValueError(\"you need to run ABCE in calendar mode, use simulation.declare_calendar(2000, 1, 1)\")", "def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'", "def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)", "def nowISO():\n return dt2ISO(datetime.datetime.utcnow())", "def qToday():\n \n return _qDate.todaysDate().ISO()", "def now_iso():\n return to_iso(now())", "def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)", "def date(self):", "def get_date():\n\n return tz.get_brisbane_time().date()", "def format_date_iso(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y-%m-%d')", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def date_handler(end):\n calc_date = datetime.strptime(str(end), \"%Y-%m-%d\")\n shift = timedelta(8)\n new_date = calc_date + shift\n\n return new_date.date()", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def _get_date():\n return datetime.datetime.now()", "def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )", "def _date(self) -> datetime:\n return self.__date", "def _today() -> datetime.date:\n return datetime.today().date()", "def published_date(subtract_days=0):\n pub_date = datetime.datetime.today()\n pub_date = pub_date - datetime.timedelta(days=subtract_days)\n return datetime.datetime.strftime(pub_date, \"%Y-%m-%dT%H:%M:%SZ\")", "def get_python_date(self):\n return dateutil.parser.parse(self.iso_date)", "def starting_date(self):\n return datetime.date(2016, 1, 4)", "def get_today() -> datetime.date:\n return datetime.date.today()", "def as_ical(self):\n if self.date_is_approximate:\n return None\n\n ymd = (self.date.year, self.date.month, self.date.day)\n event_date = date(*ymd)\n event = icalendar.Event()\n event.add(\"dtstart\", event_date)\n event.add(\"dtend\", event_date + timedelta(days=1))\n event.add(\"uid\", self.ical_uid)\n event.add(\"summary\", \"Django Girls %s\" % self.city)\n event.add(\"location\", f\"{self.country}, {self.city}\")\n return event", "def xlDateISO(xdate):\n # QuantLib doesn't support dates prior to 1901\n # which saves us from dealing with the leap year problem\n if xdate < 367:\n return \"#Date prior to 1901-01-01\"\n \n # python dates are from year zero, excel from 1900\n return date.fromordinal(693594 + int(xdate)).isoformat()", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def get_start_date(self):\n return \"%d%02d\" % (self.year, self.term)", "def convertFromISODate(date):\n if date:\n try:\n datetime_object = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n return date\n else:\n return datetime_object.strftime('%Y-%m-%d')\n else:\n return None", "def date(self) -> Optional[int]:\n return pulumi.get(self, \"date\")", "def date(self):\n return self.date_value", "def __get_settlement_date():\n day_after_tomorrow = datetime.now(timezone.utc).date() + \\\n timedelta(days=2)\n settlement_date = day_after_tomorrow.strftime(\"%Y%m%d\")\n\n return settlement_date", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def date(self):\n return date(self._year, self._month, self._day)", "def get_date():\n return datetime.datetime.now()", "def date(self):\n return self._date", "def date(self):\n # type: () -> date\n return self._date", "def fake_date_without_day(value):\n return date(year=value[0], month=value[1], day=1)", "def todate(self):\n return self._date", "def today(self):\n return(datetime.date.today().isoformat())", "def init_date( self ) -> datetime:\n return datetime( 2011 ,2 ,1 )", "def get_pub_date():\n return datetime.datetime.now()", "def get_date(self, datetime):\n return datetime.date()", "def default_date(self):\n return datetime.datetime.now().strftime('%Y-%m-%d')", "async def date(self) -> dt.date:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).date()", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))", "def date(self):\n return self._date", "def date(self):\n return self._date", "def date(self):\n return self._date", "def get_date(self, ord):\n if 0 <= ord < self.days_count:\n return self.start + timedelta(days=ord)\n else:\n raise IndexError()", "def format_iso_date(date, night_date=True):\n if isinstance(date, str):\n date = Time(date, format=\"fits\").datetime\n elif isinstance(date, datetime):\n date = Time(date, format=\"datetime\").datetime\n\n if night_date:\n return (\n date - timedelta(hours=15)\n ).date() # If obs goes up to 15pm it still belongs to day before\n else:\n return date", "def to_stdlib(self) -> dt.date:\n return dt.date.fromordinal(self.toordinal())", "def date_to_str(obj: \"date\") -> str:\n return obj.isoformat()", "def easter_date(y):\r\n return dateutil.easter.easter(int(y)).strftime('%Y%m%d')", "def rfc3339date(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds=-time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds=time.altzone)\n return date.strftime('%Y-%m-%dT%H:%M:%SZ')", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def date_added(self) -> str:\n return self._date_added.strftime('%Y-%m-%d')", "def ts_datetime(val):\n return val.isoformat() + \"Z\"", "def format_datestr(v):\n return v.isoformat() + 'Z'", "def get_datecode():\n now = datetime.utcnow()\n return now.strftime(\"%Y%m%d\")", "def add_gigasecond(birth_date):\n\n # this line makes me appreciating JS implementation\n birth_timestamp = calendar.timegm(birth_date.timetuple())\n after_timestamp = birth_timestamp + 10 ** 9\n\n return datetime.utcfromtimestamp(after_timestamp)", "def getActiveDate(self):\n dateAsQDate = self.workCalendar.selectedDate()\n dateString = str(dateAsQDate.toString('dd/MM/yyyy'))\n dateAsDateTime = OINKM.getDate(dateString)\n return dateAsDateTime", "def date(self) -> datetime.datetime:\n return self._data['Date'] - datetime.timedelta(0, float(self.exposuretime), 0)", "def test_2_default_start_date(self):\n date = FeaturedCommunity.query.get(2).start_date.date()\n self.assertEqual(date, datetime.date.today())", "def case_event_date_day_represent(value):\n\n return S3DateTime.date_represent(value, utc=True)", "def format_js_iso(date):\n return datetime.datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.{0}Z').format(int(round(date.microsecond / 1000.0)))", "def get_oldest_article_date():\n\n # date = datetime.datetime.strptime(date, \"%m/%d/%Y\")\n today_date = datetime.date.today()\n last_week = today_date-timedelta(days=2)\n search_date = last_week.isoformat()\n\n return search_date", "def format_iso_now():\n return datetime.datetime.utcnow().isoformat()+'Z'", "def date_added(self):\n return datetime.datetime.fromtimestamp(self.fields['addedDate'])", "def getDate(self): # real signature unknown; restored from __doc__\r\n pass", "def date_now_plus_year():\n return (datetime.date.today() + datetime.timedelta(days=365))", "def filter_simple_date(value: datetime) -> str:\n return value.strftime(\"%Y-%m-%d\")", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def get_date(self):\n return self.individual_session.session_date.strftime('%A')", "def date(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"date\")", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def TODAY():\n return datetime.date.today()", "def date(self):\n bc = self.barcamp\n if bc.start_date and bc.end_date:\n # TODO: localize it\n return \"%s - %s\" %(\n bc.start_date.strftime('%d.%m.%Y'),\n bc.end_date.strftime('%d.%m.%Y'))\n else:\n return self.handler._(\"date to be announced\")", "def getValue(self):\n return qDate2Date(self.field.date())", "def get_crpp_date(dtThis):\n\n # Model: yyyy-MM-dd'T'HH:mm:ss\n sDate = dtThis.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return sDate", "def get_crpp_date(dtThis):\n\n # Model: yyyy-MM-dd'T'HH:mm:ss\n sDate = dtThis.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return sDate", "def to_date(self):\n return self._to_date", "def test_startdate(self):\n req = create_request(query_string={'dates': '7d'})\n eq_(startdate(req), date.today() - timedelta(days=7))\n\n req = create_request(query_string={'dates': 'today'})\n eq_(startdate(req), date.today())\n\n req = create_request(query_string={'day': '2012-05-24'})\n eq_(startdate(req), datetime(2012, 5, 24))\n\n req = create_request(query_string={'week': '2012-05-24'})\n eq_(startdate(req), datetime(2012, 5, 21))\n\n req = create_request(query_string={'day': 'today'})\n eq_(startdate(req), None)\n\n req = create_request()\n eq_(startdate(req), None)", "def get_date():\n dt = datetime.now()\n return dt.strftime(\"%Y-%m-%d\")", "def date_to_iso8601(date):\n return '%s-%02d-%02d' % (date.year, date.month, date.day)", "def get_due_date(self):\n return self.created_at + self.urgency_level.duration", "def nepalinow(format=\"%B %d, %Y, %A\"):\n\treturn to_nepali_datetime(timezone.now()).strftime(format)", "def hydrate_date(days):\n return Date.from_ordinal(unix_epoch_date_ordinal + days)", "def ship_date(self):\n return self.created.date()", "def setEvaluationDate(cell):\n global _qToday\n \n _qToday = toDate(cell.value)\n if not to_date:\n _qToday = Settings.instance().getEvaluationDate()\n else:\n Settings.instance().setEvaluationDate(_qToday)\n \n return _qToday.ISO()", "def __calculate_date_recorded(self, upload_date_str):\n\n upload_date = datetime.date(\n int(upload_date_str[0:4]),\n int(upload_date_str[4:6]), int(upload_date_str[6:8]))\n if self.event.know_date:\n if not (self.event.date_begin <= upload_date <=\n self.event.date_end):\n return self.event.date_default.isoformat()\n\n return upload_date.isoformat()" ]
[ "0.6720962", "0.6390619", "0.6365043", "0.62321824", "0.62134486", "0.6139947", "0.61357707", "0.6097243", "0.60584015", "0.60497934", "0.6018173", "0.60177577", "0.6013842", "0.6012742", "0.5981008", "0.5978285", "0.59668523", "0.59668523", "0.59668523", "0.59668523", "0.5959705", "0.59519523", "0.5933397", "0.5921298", "0.5916463", "0.5884206", "0.58833414", "0.5876679", "0.5849903", "0.58490884", "0.58175427", "0.5793324", "0.578275", "0.57787025", "0.5761371", "0.57595485", "0.57552487", "0.5744373", "0.5741845", "0.57360184", "0.5734828", "0.5728703", "0.5727972", "0.5727279", "0.57245547", "0.5719726", "0.57174516", "0.57164663", "0.56901145", "0.56784344", "0.56763446", "0.5675159", "0.5663545", "0.56612504", "0.56612504", "0.56612504", "0.5659394", "0.56540835", "0.5648052", "0.563791", "0.56352454", "0.5634951", "0.562706", "0.56229603", "0.5611533", "0.56086195", "0.56081486", "0.5591875", "0.5580657", "0.5574647", "0.55650735", "0.55489093", "0.5548475", "0.554581", "0.5543891", "0.5542431", "0.5534968", "0.55322844", "0.5526285", "0.55251646", "0.55232614", "0.5518256", "0.55178577", "0.55178577", "0.55156356", "0.5513209", "0.5509484", "0.5508946", "0.5508946", "0.5494509", "0.54903156", "0.5489399", "0.54859036", "0.5483881", "0.54784876", "0.54769254", "0.54741764", "0.54723364", "0.54711914" ]
0.5598111
68
Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars.
def list_calendars(service): app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind, \"id\": id, \"summary\": summary, \"selected\": selected,\n \"primary\": primary})\n app.logger.debug(\"About to return from list_calendars with: \", result)\n return sorted(result, key=cal_sort_key)", "def getUsrCals(self, service):\n return self.service.calendarList().list().execute()", "def func_calendar_list():\r\n creds = None\r\n global page_token\r\n #global new_calendar_list=[]\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n new_calendar_list = []\r\n for calendar_list_entry in calendar_list['items']:\r\n new_calendar_list.append(calendar_list_entry['summary'])\r\n page_token = calendar_list.get('nextPageToken')\r\n return (new_calendar_list)", "def calendars(self):\r\n return c.Calendars(self)", "def calendars(self):\r\n return c.Calendars(self)", "def get_calendar(gtfs_info):\n # Parse calendar\n use_cols = ['service_id', 'weekdays', 'start_date', 'end_date']\n calendar = gtfs_info.drop_duplicates(subset=use_cols)\n calendar = calendar[use_cols].copy()\n calendar = calendar.reset_index(drop=True)\n\n # Container for final results\n gtfs_calendar = pd.DataFrame()\n\n # Parse weekday columns\n for idx, row in calendar.iterrows():\n # Get dayinfo\n dayinfo = row['weekdays']\n\n # Parse day information\n dayrow = parse_day_range(dayinfo)\n\n # Add service and operation range info\n dayrow['service_id'] = row['service_id']\n dayrow['start_date'] = row['start_date']\n dayrow['end_date'] = row['end_date']\n\n # Add to container\n gtfs_calendar = gtfs_calendar.append(dayrow, ignore_index=True, sort=False)\n\n # Fix column order\n col_order = ['service_id', 'monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday',\n 'start_date', 'end_date']\n gtfs_calendar = gtfs_calendar[col_order].copy()\n\n # Ensure correct datatypes\n int_types = ['monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday']\n for col in int_types:\n gtfs_calendar[col] = gtfs_calendar[col].astype(int)\n\n return gtfs_calendar", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def list_calendars(self, limit=None, *, query=None, order_by=None):\n url = self.build_url(self._endpoints.get('root_calendars'))\n\n params = {}\n if limit:\n params['$top'] = limit\n if query:\n params['$filter'] = str(query)\n if order_by:\n params['$orderby'] = order_by\n\n response = self.con.get(url, params=params or None)\n if not response:\n return []\n\n data = response.json()\n\n # Everything received from cloud must be passed as self._cloud_data_key\n contacts = [self.calendar_constructor(parent=self, **{\n self._cloud_data_key: x}) for x in data.get('value', [])]\n\n return contacts", "def calendars(self):\n return self.calendar_home_set.calendars()", "def calendar_list(self, calendar_id):\r\n return CalendarList(self, calendar_id)", "def calendars(self):\n cals = []\n\n data = self.children(cdav.Calendar.tag)\n for c_url, c_type, c_name in data:\n try:\n cal_id = c_url.split(\"/\")[-2]\n except:\n log.error(f\"Calendar {c_name} has unexpected url {c_url}\")\n cal_id = None\n cals.append(\n Calendar(self.client, id=cal_id, url=c_url, parent=self, name=c_name)\n )\n\n return cals", "def get_events():\n\n all_calendar_events = {}\n\n # Suppress warning in logs\n # https://github.com/googleapis/google-api-python-client/issues/299\n service = build('calendar', 'v3', credentials=google_auth.creds, cache_discovery=False)\n\n now = datetime.datetime.utcnow().today().isoformat() + 'Z' # 'Z' indicates UTC time\n\n for calendar_name, calendar_id in config.GOOGLE_CALENDARS.items():\n all_events = []\n events_result = service.events().list(calendarId=calendar_id, timeMin=now,\n maxResults=10, singleEvents=True, orderBy='startTime').execute()\n events = events_result.get('items', [])\n if not events:\n all_events.append(['Ei tulevia tapahtumia'])\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))[:10]\n all_events.append([start, event[\"summary\"], event[\"htmlLink\"]])\n all_calendar_events[calendar_name] = all_events\n\n return all_calendar_events", "def calendars(self):\n if \"calendars\" in self._prop_dict:\n return CalendarsCollectionPage(self._prop_dict[\"calendars\"])\n else:\n return None", "def calendars(self):\n return self.properties.get('calendars',\n EntityCollection(self.context, Calendar,\n ResourcePath(\"calendars\", self.resource_path)))", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n plusService = discovery.build('plus', 'v1', http=http_auth)\n app.logger.debug(\"Returning service\")\n return [service, plusService]", "def get_gcal_events(service, from_time):\n\n # The list() method returns a dict containing various metadata along with the actual calendar entries (if any). \n # It is not guaranteed to return all available events in a single call, and so may need called multiple times\n # until it indicates no more events are available, signalled by the absence of \"nextPageToken\" in the result dict\n\n logger.debug('Retrieving Google Calendar events')\n\n # make an initial call, if this returns all events we don't need to do anything else,,,\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n\n events = eventsResult.get('items', [])\n # if nextPageToken is NOT in the dict, this should be everything\n if 'nextPageToken' not in eventsResult:\n logger.info('> Found {:d} upcoming events in Google Calendar (single page)'.format(len(events)))\n return events\n\n # otherwise keep calling the method, passing back the nextPageToken each time\n while 'nextPageToken' in eventsResult:\n token = eventsResult['nextPageToken']\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n pageToken=token, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n newevents = eventsResult.get('items', [])\n events.extend(newevents)\n logger.debug('> Found {:d} events on new page, {:d} total'.format(len(newevents), len(events)))\n \n logger.info('> Found {:d} upcoming events in Google Calendar (multi page)'.format(len(events)))\n return events", "def get_cal_events(user, calservice):\r\n cal_page_token = None\r\n while True:\r\n try:\r\n #the next for loop retrives the calendar events\r\n #list to be checked for matching criteria\r\n prieml = user['primaryEmail']\r\n creator_to_del = '[email protected]'\r\n event_to_del = 'Digital Directorate Team Meeting'\r\n events = calservice.events().list(calendarId=prieml,\r\n pageToken=cal_page_token).execute()\r\n for event in events['items']:\r\n if event['status'] != 'cancelled':\r\n try:\r\n #this is the criteri to be checked against\r\n organiser = event['organizer']['email']\r\n summary = event['summary']\r\n if organiser == creator_to_del \\\r\n and summary == event_to_del:\r\n try:\r\n #checking for specific start date \r\n #in the event some events have different\r\n #dateTime\\date keywords\r\n if event['start']['dateTime']:\r\n evdate = event['start']['dateTime']\r\n startDate = datetime.strptime(evdate[0:10],\r\n '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate[0:10]))\r\n except KeyError:\r\n #if the keyword is not dateTime \r\n #then fetch date keyword\r\n evdate = event['start']['date']\r\n startDate = datetime.strptime(evdate, '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate))\r\n except KeyError:\r\n continue\r\n cal_page_token = events.get('nextPageToken')\r\n if not cal_page_token:\r\n break\r\n except ValueError:\r\n print('Oops! Thhe last event has an error. Try again...')", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "def calendar_lists(self):\r\n return CalendarLists(self)", "def choose_calendar(self):\n page_token = None\n self.calendar_list = self.service.calendarList().list(pageToken=page_token).execute()\n for calendar_list_entry in self.calendar_list['items']:\n if similar(calendar_list_entry['summary'], self.args[\"calendar_name\"]) > 0.8:\n self.chosen_calendar = calendar_list_entry['id']\n return\n raise CalendarNotFoundException(\"No calendar with the provided name was found\")", "def calendar(self, calendar_id):\r\n return c.Calendar(self, calendar_id)", "def calendar(self, calendar_id):\r\n return c.Calendar(self, calendar_id)", "def readGoogleCal(self):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n events_result = service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n\r\n dateVar, timeVar = start.split('T')\r\n eventVar = event['summary']\r\n\r\n self.calDate.append(dateVar)\r\n self.calTime.append(timeVar)\r\n self.calEvent.append(eventVar)\r\n #print(calDate[count]+' ' + calTime[count] + ' ' +calEvent[count])\r", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service", "def _get_service_list(self, service_name):\n service_list = self.service_dict[service_name]\n\n return service_list", "def list_services(service='http://arcgis.inei.gob.pe:6080/arcgis/rest/services'):\n all_services = []\n r = _post(service)\n for s in r['services']:\n all_services.append('/'.join([service, s['name'], s['type']]))\n for s in r['folders']:\n new = '/'.join([service, s])\n endpt = _post(new)\n for serv in endpt['services']:\n all_services.append('/'.join([service, serv['name'], serv['type']]))\n return all_services", "def main():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('cal_token.json'):\r\n creds = Credentials.from_authorized_user_file('cal_token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('cal_token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n return service", "def get_calendar_events(calendar_url, params=None):\n return cache_calendar_events(calendar_url, params=params)\n # return CALENDAR_CACHED or cache_calendar(calendar_url)", "def selectable_services():\n\n db = current.db\n s3db = current.s3db\n\n stable = s3db.org_service\n query = (stable.deleted == False)\n rows = db(query).select(stable.id,\n stable.name,\n )\n services = {row.id: row.name for row in rows}\n return services", "def get_events_results(calendar_id=None):\n\n service = get_service()\n\n start, end = get_time_constraints()\n if calendar_id != None:\n events_results = service.events().list(calendarId=calendar_id,\n timeMin=start, \n timeMax=end, \n singleEvents=True,\n orderBy=\"startTime\").execute()\n else:\n events_results = service.events().list(calendarId='primary',\n timeMin=start, \n timeMax=end, \n singleEvents=True,\n orderBy=\"startTime\").execute()\n \n return events_results", "def __addCalendars(self, tree, key=\"dates/calendars\"):\n \n calendars = self.__getStore(self.__data, \"calendar\")\n \n for element in tree.findall(\"./%s/*\" % key):\n if not element.get(\"draft\"):\n self.__addCalendar(calendars, element)", "def get_service():\n \n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n token_path = f\"{sys.path[0]}/creds/token.pickle\"\n if os.path.exists(token_path):\n with open(token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n cred_path = f\"{sys.path[0]}/creds/credentials.json\"\n flow = InstalledAppFlow.from_client_secrets_file(\n cred_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n return service", "def authenticate_google():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n return service", "def calendar_events(self):\r\n return calendars.CalendarEvents(self)", "def available_calendars(self):\n\n calendars = [filename for filename in listdir(self.calendar_directory)\n if isfile(join(self.calendar_directory, filename))]\n\n # Filter the calendar.judaic.2018 files and so on,\n # because we use the symbolic link calendar.judaic\n calendars = [calendar for calendar in calendars\n if not calendar.startswith(\"calendar.judaic.\")]\n return sorted([calendar[9:] for calendar in calendars])", "def list_services(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum77\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum78\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum79\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrintService\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrintService\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_services.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrintService', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def _get_pay_calendars(self, leg):\n pay_calendars = [\n leg.PayCalendar(),\n leg.Pay2Calendar(),\n leg.Pay3Calendar(),\n leg.Pay4Calendar(),\n leg.Pay5Calendar()\n ]\n return [cal for cal in pay_calendars if cal is not None]", "def apt_list(cal, c_id, start, end):\n\n # Get the appointments returning it as list of dictionaries\n appointments_result = cal.events().list(\n calendarId=c_id,\n timeMin=start,\n timeMax=end,\n singleEvents=True,\n orderBy='startTime'\n ).execute()\n appointments = appointments_result.get('items', [])\n return appointments", "def get_all(self, marker=None, limit=None,\n sort_key='name', sort_dir='asc'):\n\n services = self._get_services(marker,\n limit,\n sort_key,\n sort_dir)\n\n return ServicesCollection.convert_with_links(services, limit,\n sort_key=sort_key,\n sort_dir=sort_dir)", "def getCalendar(self):\n cal = BlankCalendar()\n for datable in self.run_query():\n cal.add_component(datable.getEvent())\n \n return cal", "def get_calendar_events(calendar_id, max_events, time_min):\n time_max = get_current_time(time_max=True)\n print(\"Fetching calendar events from {0} to {1}\".format(time_min, time_max))\n\n headers = {\n \"Authorization\": \"Bearer \" + google_auth.access_token,\n \"Accept\": \"application/json\",\n \"Content-Length\": \"0\",\n }\n url = (\n \"https://www.googleapis.com/calendar/v3/calendars/{0}\"\n \"/events?maxResults={1}&timeMin={2}&timeMax={3}&orderBy=startTime\"\n \"&singleEvents=true\".format(calendar_id, max_events, time_min, time_max)\n )\n resp = magtag.network.requests.get(url, headers=headers)\n resp_json = resp.json()\n if \"error\" in resp_json:\n raise RuntimeError(\"Error:\", resp_json)\n resp.close()\n # parse the 'items' array so we can iterate over it easier\n items = []\n resp_items = resp_json[\"items\"]\n if not resp_items:\n print(\"No events scheduled for today!\")\n for event in range(0, len(resp_items)):\n items.append(resp_items[event])\n return items", "def current_events(service, calander_id):\n event = service.events().get(calendarId='[email protected]', eventId=calander_id).execute()\n return event", "def buildAPICal(self, credentials):\n from googleapiclient.discovery import build\n return build('calendar', 'v3', credentials=self.creds)", "def get_services(**options):\n\n return {}", "def get_services(**options):\r\n return {}", "def flatten_events(events_calendar_dic, sort=False):\n\n events = []\n for calendar, events_list in events_calendar_dic.items():\n for event in events_list:\n event['calendar'] = calendar\n events += events_list\n\n if sort: return sorted(events, key = lambda x: parse(x['start']['dateTime']))\n return events", "def get_schedules():\n return json.dumps(calendar.get_schedules())", "def calendar_groups(self):\n if \"calendarGroups\" in self._prop_dict:\n return CalendarGroupsCollectionPage(self._prop_dict[\"calendarGroups\"])\n else:\n return None", "def calendar(self, name=None, cal_id=None):\n if name and not cal_id:\n for calendar in self.calendars():\n display_name = calendar.get_display_name()\n if display_name == name:\n return calendar\n if name and not cal_id:\n raise error.NotFoundError(\n \"No calendar with name %s found under %s\" % (name, self.url)\n )\n if not cal_id and not name:\n return self.calendars()[0]\n\n if str(URL.objectify(cal_id).canonical()).startswith(\n str(self.client.url.canonical())\n ):\n url = self.client.url.join(cal_id)\n elif (\n isinstance(cal_id, URL)\n or cal_id.startswith(\"https://\")\n or cal_id.startswith(\"http://\")\n ):\n url = self.url.join(cal_id)\n else:\n url = self.url.join(quote(cal_id) + \"/\")\n\n return Calendar(self.client, name=name, parent=self, url=url, id=cal_id)", "def get_all_clients_for_service(\n self, job_config: \"MarathonServiceConfig\"\n ) -> Sequence[MarathonClient]:\n all_clients = [self.get_current_client_for_service(job_config)]\n all_clients.extend(self.get_previous_clients_for_service(job_config))\n\n return dedupe_clients(all_clients)", "def list_services(self):\n service_types = list(self.services.keys())\n service_types.sort()\n\n services = {}\n for s_type in service_types:\n if s_type not in services:\n services[s_type] = []\n names = list(self.services[s_type].keys())\n names.sort()\n for name in names:\n services[s_type].append(name)\n return services", "def calendar(self, name=None, cal_id=None, cal_url=None):\n if not cal_url:\n return self.calendar_home_set.calendar(name, cal_id)\n else:\n return Calendar(self.client, url=self.client.url.join(cal_url))", "def handle_get_calendar(user_id, id):\n if not(get_user_global_preferences(user_id)):\n return not_found(jsonify(dict(error='User not found')))\n\n calendar = get_calendar(user_id, id)\n\n if calendar:\n\n response = dict(\n name=calendar.name,\n description=calendar.description,\n base=calendar.base,\n color=calendar.color,\n active=calendar.active,\n carbon=calendar.carbon,\n preferences=calendar.preferences\n )\n\n return ok(jsonify(response))\n\n return not_found(jsonify(dict(error='Calendar not found')))", "def _PrintUserCalendars(self):\n\n feed = self.cal_client.GetAllCalendarsFeed()\n print 'Printing allcalendars: %s' % feed.title.text\n for i, a_calendar in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, a_calendar.title.text,)", "def get_date_list(self, queryset, date_type):\n date_field = self.get_date_field()\n dates_group = [list(qs.dates(date_field, date_type)) for qs in queryset]\n dates = [d.day for d in reduce(lambda a, b: a + b, dates_group, [])]\n\n calendar.setfirstweekday(6) # starts at sunday\n month = self.get_month()\n year = self.get_year()\n cal = calendar.monthcalendar(int(year), int(month))\n\n for i, week in enumerate(cal):\n for j, day in enumerate(week):\n state = models.DATE_STATES[dates.count(day)]\n cal[i][j] = {'day': day, 'state': state}\n return cal", "def get_calendar(self, month: int = None, year: int = None) -> List[Hours]:\n url = \"/v1/markets/calendar\"\n params = {\"month\": month, \"year\": year}\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.calendar.days.day", "def get_services(self): \n if self._access_token is None:\n raise RequiresAccessTokenError()\n\n response = self.__make_oauth_request(ADD_URLS_FOR_SERVICES_URL, token=self._access_token, signed=True)\n return simplejson.loads(response.read()).keys()", "def pull_calendar_events(calendarId, timeMax=datetime.datetime.now().isoformat('T')):\n\n # NOTE: Pulling events since last update time will be implemented later!\n\n\n eventList = [] # List of events returned by API.\n nextPageToken = \"\"\n while True:\n print(\"------NEW REQUEST------\")\n\n response = service.events().list(calendarId=calendarId, orderBy=\"updated\", pageToken = nextPageToken, timeMax=timeString,fields = fieldString).execute()\n if 'items' in response:\n eventList += response['items'] # if the response has events, add them\n\n if \"nextPageToken\" not in response:\n print(\"DONE\")\n break \n # If there's no nextPageToken in response body, we break out and return what we have\n else:\n print(\"NEXT PAGE TOKEN: \" + response['nextPageToken'])\n nextPageToken = response['nextPageToken'] # Otherwise, make another request for next page\n\n return eventList", "def cal(userEvent):\n\n # def cal(userEvent):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n\n# ------------- Calling the calendar --------------\n\n # print('Printing events from', cal_name)\n\n events_result = service.events().list(calendarId=calid, timeMin=now,\n maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n\n# ---------------- grabing old events ALL EVENTS ----------------------\n# prior to deletion if parsing is succefull\n old_list = []\n\n for old_event in events:\n # print(calendarId)\n # service.events().delete(calendarId=cal_id,\n # eventId=event['id']).execute()\n\n old_event = service.events().get(\n calendarId=cal_id, eventId=old_event['id']).execute()\n\n old_list.append(old_event['id'])\n\n # print(old_list)\n\n\n# ---------------- Creating EVENTS ----------------------\n\n # event est une variable puisée dans json file (deprecated INFO)\n # event = event_json (deprecated INFO)\n\n for k in range(len(userEvent)):\n\n try:\n\n userEvent[k] = service.events().insert(\n calendarId=cal_id, body=userEvent[k]).execute()\n\n deleteGO = True\n\n except HttpError as err:\n\n # print(sys.exc_info()[1])\n\n if err.resp.status in [400, 404]:\n\n if err.resp.get('content-type', '').startswith('application/json'):\n reason = json.loads(err.content).get(\n 'error').get('errors')[0].get('message')\n\n print('\\n', \"Veuillez remplir tous les champs\")\n print(\"L'événement comportant l'erreur a été ignoré\")\n exit()\n # print(reason)\n\n # service.events().delete(calendarId=cal_id,\n # eventId=userEvent[k]).execute()\n\n deleteGO = False\n\n\n# ---------------- Printing EVENTS ----------------------\n# TB Shoot les events apparaissent avec un délai\n # print(events)\n # if not events:\n # print('No upcoming events found.')\n # for event in events:\n # start = event['start'].get('dateTime', event['start'].get('date'))\n # print(start, event['summary'])\n\n# ---------------- deleting old EVENTS ----------------------\n\n if deleteGO:\n\n for event2go in old_list:\n\n service.events().delete(calendarId=cal_id,\n eventId=event2go).execute()", "def get_services(self):\r\n return get_service_list()", "def calendar(self):\n if \"calendar\" in self._prop_dict:\n if isinstance(self._prop_dict[\"calendar\"], OneDriveObjectBase):\n return self._prop_dict[\"calendar\"]\n else :\n self._prop_dict[\"calendar\"] = Calendar(self._prop_dict[\"calendar\"])\n return self._prop_dict[\"calendar\"]\n\n return None", "def calendar(self):\n if \"calendar\" in self._prop_dict:\n if isinstance(self._prop_dict[\"calendar\"], OneDriveObjectBase):\n return self._prop_dict[\"calendar\"]\n else :\n self._prop_dict[\"calendar\"] = Calendar(self._prop_dict[\"calendar\"])\n return self._prop_dict[\"calendar\"]\n\n return None", "def getServiceDefinitions(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n json_response = get_services_json(strCSPProdURL, ORG_ID, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n\n services= json_response['servicesList']\n table = PrettyTable(['Service Name', 'Access type', 'Service URL'])\n for i in services:\n table.add_row([i['displayName'], i['serviceAccessType'], i['serviceUrls']['serviceHome']])\n print(table)", "async def test_calendars_http_api(hass, hass_client):\n await async_setup_component(hass, \"calendar\", {\"calendar\": {\"platform\": \"demo\"}})\n await hass.async_block_till_done()\n client = await hass_client()\n response = await client.get(\"/api/calendars\")\n assert response.status == 200\n data = await response.json()\n assert data == [\n {\"entity_id\": \"calendar.calendar_1\", \"name\": \"Calendar 1\"},\n {\"entity_id\": \"calendar.calendar_2\", \"name\": \"Calendar 2\"},\n ]", "def calendar_events(self):\r\n return CalendarEvents(self)", "def get_calendar(self, calendar_id=None, calendar_name=None):\n if calendar_id and calendar_name:\n raise RuntimeError('Provide only one of the options')\n\n if not calendar_id and not calendar_name:\n raise RuntimeError('Provide one of the options')\n\n if calendar_id:\n # get calendar by it's id\n url = self.build_url(\n self._endpoints.get('get_calendar').format(id=calendar_id))\n params = None\n else:\n # get calendar by name\n url = self.build_url(self._endpoints.get('root_calendars'))\n params = {\n '$filter': \"{} eq '{}'\".format(self._cc('name'), calendar_name),\n '$top': 1}\n\n response = self.con.get(url, params=params)\n if not response:\n return None\n\n if calendar_id:\n data = response.json()\n else:\n data = response.json().get('value')\n data = data[0] if data else None\n if data is None:\n return None\n\n # Everything received from cloud must be passed as self._cloud_data_key\n return self.calendar_constructor(parent=self,\n **{self._cloud_data_key: data})", "async def api_get_services(g: WalletTypeInfo = Depends(get_key_type)):\n user = await get_user(g.wallet.user)\n wallet_ids = user.wallet_ids if user else []\n services = []\n for wallet_id in wallet_ids:\n new_services = await get_services(wallet_id)\n services += new_services if new_services else []\n return [service.dict() for service in services] if services else []", "def sheets_service() -> object:\n g_sheets_service = build('sheets', 'v4', credentials=google_creds())\n\n return g_sheets_service", "def configure_gcal_main():\n\n # Change Google service account credentials back to main service account\n GoogleCalendarApi.credentials_dir = TestUtils.GCAL_MAIN_CREDENTIALS_DIR\n\n # Change the calendar gcal_api.py accesses back to main calendar\n GoogleCalendarApi.calendar_id = TestUtils.GCAL_MAIN_CALENDAR", "def get_services(self):\n collection_list = []\n try:\n services = self.client.discover_services()\n if services:\n for service in services:\n if 'collection' in service.type.lower():\n for eachone in self.get_collection(service.address):\n collection_list.append({'name': eachone.name})\n break\n except Exception as e:\n demisto.error(\"Failed to fetch collections, exception:{}\".format(e))\n raise e\n\n return collection_list", "def refresh(self):\n selected = []\n if not self.__new_service:\n selected = [str(t.text()) for t in\n self.__service_list.selectedItems()]\n\n self.__service_list.clear()\n if not self.__show:\n self.__services = opencue.api.getDefaultServices()\n else:\n self.__services = self.__show.getServiceOverrides()\n\n for service in self.__services:\n item = QtWidgets.QListWidgetItem(service.name())\n self.__service_list.addItem(item)\n\n if service.name() in selected:\n item.setSelected(True)\n\n self.__service_list.sortItems()", "def get_effective_services(self):\n myname = self['hostgroup_name']\n if not myname: return []\n \n result = []\n for service in Service.objects.all:\n hostgroup_name = service['hostgroup_name'] or \"\"\n hostgroups = service['hostgroups'] or \"\"\n if myname in hostgroups.split(','):\n result.append( service )\n elif myname in hostgroup_name.split(\",\"):\n result.append( service )\n return result", "def get_services(self):\n\t\t#Entrega el dict sin miramientos\n\t\treturn self._services", "def getServiceIdsForDate(self, dt, exclude0000=True):\n\n dayDict = {1:\"monday\", 2:\"tuesday\", 3:\"wednesday\", 4:\"thursday\",\n 5:\"friday\", 6:\"saturday\", 7:\"sunday\"}\n sqlQuery = \"select service_id from calendar\" \\\n + \" where {} = 1\".format(dayDict[dt.isoweekday()]) \\\n + \" and start_date <= ? and end_date >= ?;\"\n\n dtstr = dt.date().isoformat().replace('-','')\n queryTuple = (dtstr, dtstr)\n cursor = self.conn.execute(sqlQuery, queryTuple)\n\n serviceIdList = []\n for row in cursor:\n serviceIdList.append(row[0])\n\n # Get rid of weird serviceId\n if exclude0000:\n serviceIdList = [ii for ii in serviceIdList if ii != '0000']\n\n return serviceIdList", "def getSDDCService(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n if kwargs['objectname'] is not None:\n service_id = kwargs['objectname']\n response = get_sddc_single_service_json(proxy,sessiontoken, service_id)\n if response is not None:\n status = response.status_code\n if status == 200:\n json_response = response.json()\n service_entries = json_response['service_entries']\n table = PrettyTable(['ID', 'Name', 'Protocol', 'Source Ports', 'Destination Ports'])\n for i in service_entries:\n table.add_row([i['id'], i['display_name'], i['l4_protocol'], i['source_ports'], i['destination_ports']])\n print(table)\n else:\n print(\"No service found by that name.\")\n sys.exit(1)\n else:\n response = get_sddc_services_json(proxy,sessiontoken)\n if response is not None:\n status = response.status_code\n if status == 200:\n json_response = response.json()\n sddc_services = json_response['results']\n table = PrettyTable(['ID', 'Name','System Owned'])\n for i in sddc_services:\n table.add_row([i['id'], i['display_name'], i['_system_owned']])\n print(table)\n else:\n print(\"Plese check your syntax and try again.\")\n sys.exit(1)", "def _get_services(self, services):\n\n services_info = []\n\n for service in services[1]:\n services_info.append(self._make_dict(service))\n \n return services_info", "def list_services(self):\n response = self._get()\n\n services = []\n for s in response[\"services\"]:\n services.append(_create_service_from_json(s, self._session, self._url_base, s[\"folderName\"]))\n\n return services", "def list_services(ctx):\n pass", "def test_get_calendar(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-calendar', subdomain=self.company.subdomain)\n \n response = self.client.post(url, {'dt': timezone.now()}, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, {'dt': timezone.now()}, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertTrue(content.has_key('calendar_data'))", "def get_services_list(self, services):\n if not services:\n return []\n\n return [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]", "def refresh_calendar():\n manage.refresh_calendar()", "def get_outlook_calendar_entries(days = 1):\r\n outlook = win32.Dispatch('outlook.application')\r\n\r\n ns = outlook.GetNamespace(\"MAPI\")\r\n appointments = ns.GetDefaultFolder(9).Items\r\n appointments.Sort(\"[Start]\")\r\n appointments.IncludeRecurrences = \"True\"\r\n\r\n date_from = datetime.datetime.today()\r\n begin = date_from.date().strftime(\"%x\")\r\n\r\n date_to = datetime.timedelta(days=(days+1)) + date_from\r\n end = date_to.date().strftime(\"%x\")\r\n\r\n date_filter = \"[Start] >= '\" + begin + \"' AND [END] <= '\" + end + \"'\"\r\n\r\n print(date_filter)\r\n\r\n appointments = appointments.Restrict(date_filter)\r\n events_list = []\r\n\r\n for a in appointments:\r\n #print(\"from appointment \" + str(a.Start))\r\n event_date = a.Start.replace(tzinfo=timezone(datetime.timedelta(seconds=time.localtime().tm_gmtoff)))\r\n events_list.append([event_date, a.Subject, a.Duration, a.Location])\r\n\r\n return events_list", "def get_services(self):\n\n return list(self.services.values())", "def get_ports(svc_group, db):\n results = []\n for svc in svc_group:\n port = db.GetService(svc)\n results.append((svc, port))\n return results", "def services_by_name(self,servicename):\n\t\tres = []\n\t\tfor k,v in self.services.items():\n\t\t\tif k[1].lower() == servicename.lower():\n\t\t\t\tres += [self.services[k]]\n\t\treturn res", "def get_google_service_account(cls, handler):\n\n if roles.Roles.is_super_admin():\n # ?tab= for v1.9, ?action= for v1.8\n exit_url = '%s?tab=google_service_account' % handler.LINK_URL\n else:\n exit_url = cls.request.referer\n rest_url = GoogleServiceAccountRESTHandler.URI\n\n template_values = {}\n template_values['page_title'] = handler.format_title(\n 'Google Service Accounts')\n\n content = safe_dom.NodeList()\n edit_google_service_account_action = (\n base.GoogleServiceAccountBase.\n DASHBOARD_EDIT_SERVICE_ACCOUNT_ACTION)\n\n for name, key in (service_account_models.GoogleServiceAccountTypes.\n to_dict().iteritems()):\n content.append(\n safe_dom.Element(\n 'a', id=edit_google_service_account_action,\n className='gcb-button gcb-pull-right', role='button',\n style='margin: 5px',\n href='%s?action=%s&key=%s&credential_type=%s' % (\n handler.LINK_URL, edit_google_service_account_action,\n key, key)\n ).add_text('Add/Edit %s object' % name)\n )\n\n # Title - Default Settings\n content.append(\n safe_dom.Element('h3').add_text('Default Settings')\n )\n\n # Table - Default Settings\n table_div = safe_dom.Element(\n 'div', style='width: 100%; overflow: scroll; margin-top: 10px;')\n table = safe_dom.Element('table')\n table_div.add_child(table)\n content.append(table_div)\n\n table_heading = safe_dom.Element('tr')\n for attr in cls.TABLE_HEADING_LIST:\n table_heading.add_child(\n safe_dom.Element('th').add_text(attr))\n\n # table_heading.add_child(\n # safe_dom.Element('th').add_text('Edit Link'))\n\n table.add_child(table_heading)\n\n all_settings = (\n google_service_account.GoogleServiceManager.\n get_all_default_settings())\n\n # TODO(rthakker) Add support for namespaces from course list etc\n # later on\n for entity in all_settings:\n tr = safe_dom.Element('tr')\n table.add_child(tr)\n args = {\n 'action': edit_google_service_account_action,\n 'key': entity.id,\n }\n\n for attr in cls.TABLE_HEADING_LIST:\n tr.add_child(safe_dom.Element('td').add_text(\n getattr(entity, attr)\n ))\n\n # href = '%s?%s' % (handler.LINK_URL, urllib.urlencode(args))\n # link = safe_dom.Element(\n # 'a', href=href, type='button', className='gcb-button'\n # ).add_text('Edit')\n # edit_td = safe_dom.Element('td')\n # edit_td.add_child(link)\n # tr.add_child(edit_td)\n\n\n content.append(\n safe_dom.Element('p').add_text('Total: %d' % len(all_settings))\n )\n template_values['main_content'] = content\n handler.render_page(template_values)", "def get_holidays(year, url, service_key):\n payload = {'solYear': str(year),\n 'numOfRows': '50',\n '_type': 'json',\n 'ServiceKey': service_key}\n\n payload_str = urllib.parse.urlencode(payload, safe=\"%\") # service key contains \"%\"\n\n response = requests.get(url, params=payload_str)\n if response.status_code == 200:\n holidays = [item['locdate'] for item in response.json()['response']['body']['items']['item']]\n holidays = list(map(conv_int_to_date, holidays))\n return holidays", "def collect(self):\n service = build_service('calendar', 'v3')\n data = []\n\n for time_window, suffix in zip(self._parameters['time_windows'],\n self._parameters['suffixes']):\n # TODO: Get Search prefix and calendar from configs\n events = get_calendar_entries_by_query(service, \"S-\", time_window, \"Tracking\")\n\n # If we don't have any events we set it to the window size (which is in hours)\n seconds_since = [time_window * 60 * 60]\n long_count = 0\n short_count = 0\n for event in events:\n start_time = parse(event['start'].get('dateTime'))\n end_time = parse(event['end'].get('dateTime'))\n seconds_since.append(\n (datetime.datetime.now(datetime.timezone.utc) - start_time).total_seconds())\n event_length = end_time - start_time\n if event_length.seconds <= 30*60:\n short_count += 1\n else:\n long_count += 1\n\n data.append(DataPoint(time.time(), self._base_name + 's_count.' + suffix, len(events)))\n data.append(DataPoint(time.time(), self._base_name + 's_seconds_since.' + suffix, min(seconds_since)))\n data.append(DataPoint(time.time(), self._base_name + 's_short_count.' + suffix, short_count))\n data.append(DataPoint(time.time(), self._base_name + 's_long_count.' + suffix, long_count))\n return data", "async def test_init_calendar(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n mock_calendars_list: ApiResult,\n test_api_calendar: dict[str, Any],\n mock_events_list: ApiResult,\n) -> None:\n\n mock_calendars_list({\"items\": [test_api_calendar]})\n mock_events_list({})\n assert await component_setup()\n\n state = hass.states.get(TEST_API_ENTITY)\n assert state\n assert state.name == TEST_API_ENTITY_NAME\n assert state.state == STATE_OFF\n\n # No yaml config loaded that overwrites the entity name\n assert not hass.states.get(TEST_YAML_ENTITY)", "def test_calendar_view_list(self):\n response = self.client.get('/module/calendar/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'frontend/appointment/calendar/list.html')\n\n request = self.factory.get('/module/calendar/')\n request.user = self.user\n request.session = {}\n response = calendar_list(request)\n self.assertEqual(response.status_code, 200)", "def configure_gcal_for_testing():\n\n # Change Google service account credentials for testing purposes\n GoogleCalendarApi.service_account_dir = \\\n join(dirname(realpath(__file__)),\n '../../conf/test_gcal_service_account.json')\n\n # Change the calendar gcal_api.py accesses for testing purposes.\n # 'primary' means the primary calendar of the Google service account\n # being used.\n GoogleCalendarApi.calendar_id = 'primary'", "def search_by_services(request: HttpRequest) -> QuerySet:\n services = ServiceDocument.search()\n\n query: str = request.GET.get(\"query\")\n if (query is not None) and (query != \"\"):\n services = services.query(\n MultiMatch(\n query=query,\n fields=[\n \"title\",\n \"description\",\n \"type.name\",\n \"type.risks\",\n \"validity.name\",\n \"company.name\",\n \"company.description\",\n \"company.phone\"\n ]\n )\n )\n\n filters: Dict[str, Optional[int]] = dict()\n for field in [\"type\", \"validity\", \"company\"]:\n parameter = request.GET.get(field)\n if parameter is not None:\n filters.update({f\"{field}.id\": int(parameter)})\n\n for field, term in filters.items():\n services = services.filter(\"term\", **{field: term})\n\n sort: Optional[str] = request.GET.get(\"sort\")\n services = services.sort(sort) if sort is not None else services.sort()\n\n return services[0:services.count()].to_queryset()", "def get_services(request):\n try:\n company = Company.objects.get(pk=int(request.POST['company_id']))\n\n services = []\n for service in company.services.all():\n services.append({'name': service.name, 'id': service.pk})\n\n return format_ajax_response(True, \"Company's services retrieved successfully.\", {'services': services})\n except Exception as ex:\n logging.error(\"failed to get_services: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving the company's services.\")", "def create_cal_events(list_of_events, strasse, hausnummer):\n list_of_ical_events = []\n\n for calendarEvent in list_of_events:\n event = Event()\n event = create_ical_event_from_calendar_event(event, calendarEvent, 8, 10)\n\n # Automatic encoding is not yet implemented for parameter values, so you must use the ‘v*’ types you can import from the icalendar package (they’re defined in icalendar.prop):\n event['location'] = vText('{} {}, Bremen'.format(strasse, hausnummer))\n\n # TODO uid exaclty according to specification https://www.kanzaki.com/docs/ical/uid.html\n event['uid'] = event['dtstart'].to_ical()\n event.add('priority', 5)\n list_of_ical_events.append(event)\n\n return list_of_ical_events", "def get_service(credentials):\n try:\n creds = service_account.Credentials.from_service_account_file(\n credentials, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=creds)\n drive_service = build('drive', 'v3', credentials=creds)\n return service, drive_service\n except Exception as e:\n print(f'Error accessing Google Drive with service account '\n f'{credentials}')\n raise(e)", "def get_serviceIDs_for_date(date):\n global SDHandler\n return SDHandler.effective_service_ids(date);", "def get_all():\n if not SERVICE_DIR:\n raise CommandExecutionError(\"Could not find service directory.\")\n # - List all daemontools services in\n return sorted(os.listdir(SERVICE_DIR))", "def calendar_for_event_description(ed):\n return icemac.ab.calendar.interfaces.ICalendar(ed.context)", "def calendar_options():\n user_id = current_identity.id\n\n if user_id:\n response = {\n \"status\": None,\n \"dateRange\" : []\n }\n \n possibleDateArr = query_month_year(user_id)\n\n if not possibleDateArr:\n return jsonify({\"status\" : \"error\"})\n\n response[\"dateRange\"] = format_dateRange(possibleDateArr)\n #Todo: Add dateArray infor to response\n response[\"status\"] = \"ok\"\n return jsonify(response)\n #TODO How handle if no user- id send to homepage but notices?" ]
[ "0.82477534", "0.6850524", "0.6471131", "0.63463163", "0.63463163", "0.63192886", "0.62622386", "0.6232313", "0.61395335", "0.61257684", "0.6111868", "0.61005104", "0.603974", "0.59506303", "0.5812229", "0.5798276", "0.56924987", "0.5668386", "0.5658008", "0.55682445", "0.5539597", "0.5539597", "0.55091655", "0.5496159", "0.5496159", "0.5475961", "0.5406943", "0.53625625", "0.53276706", "0.5286893", "0.52646077", "0.52432764", "0.5204965", "0.520432", "0.518836", "0.5186381", "0.51756155", "0.51633066", "0.51581806", "0.514598", "0.51385653", "0.51102144", "0.5110055", "0.51053274", "0.50935847", "0.5090862", "0.50868577", "0.50338537", "0.5015462", "0.50047535", "0.49888343", "0.49523243", "0.49424142", "0.49347705", "0.49018842", "0.48894575", "0.48880193", "0.48854604", "0.48851347", "0.487674", "0.48737815", "0.48445135", "0.48445135", "0.4828597", "0.48268136", "0.4816973", "0.4816749", "0.48128888", "0.4805013", "0.47946584", "0.47900206", "0.47827467", "0.47648358", "0.47584632", "0.4749816", "0.4742918", "0.47408932", "0.47261778", "0.4716557", "0.4687587", "0.4678008", "0.467636", "0.46582386", "0.4657069", "0.46492988", "0.46478248", "0.46396542", "0.46356478", "0.4632458", "0.46270812", "0.4612145", "0.46111572", "0.4606454", "0.46050692", "0.45941737", "0.45883828", "0.45861033", "0.45844215", "0.45816594", "0.45810533" ]
0.8079838
1
A helper method that generates a dictionary of arguments needed to instantiate a BaseBoto object. The purpose of this method is to abstract out the code to handle optional CLI arguments and not duplicate the None handling code.
def __get_arguments(args=None, logger=None, stats=None): if not args: parser = get_parser() add_boto_cli_arguments(parser) # Parse only the known arguments added by add_boto_cli_arguments(). # We only need those arguments to create Boto object, nothing else. # parse_known_args() return (Namespace, list of unknown arguments), # we only care about the Namespace object here. args = parser.parse_known_args()[0] if not logger: logger = get_logger(name=NAME) if not stats: stats = get_stats(prefix=NAME) return { 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()), 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()), 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()), 'region': getattr(args, 'boto_region', DEFAULT['region']()), 'logger': logger, 'stats': stats, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def GetArgs():\n \n UserArgs = {}\n UserArgs['help'] = False\n UserArgs['RsodFileName'] = \"\"\n UserArgs['BiosPathX64'] = \"\"\n\n for i in range(1,len(sys.argv)):\n if sys.argv[i].lower() == \"-help\" : UserArgs[\"help\"] = True\n elif sys.argv[i].lower() == \"-h\" : UserArgs[\"help\"] = True\n elif \"-rsodfile=\" in sys.argv[i].lower() : UserArgs['RsodFileName'] = sys.argv[i].split ('=', 1)[1]\n elif \"-biospathx64=\" in sys.argv[i].lower() : UserArgs['BiosPathX64'] = sys.argv[i].split ('=', 1)[1]\n\n return UserArgs", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def _build_instance_common_args(self, ec2_keyname, availability_zone,\r\n keep_alive, hadoop_version):\r\n params = {\r\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\r\n 'Instances.HadoopVersion' : hadoop_version\r\n }\r\n\r\n if ec2_keyname:\r\n params['Instances.Ec2KeyName'] = ec2_keyname\r\n if availability_zone:\r\n params['Instances.Placement.AvailabilityZone'] = availability_zone\r\n\r\n return params", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data", "def get_args_dict(class_, options: Options) -> dict:\n\n argspec = getfullargspec(class_.__init__)\n init_args = argspec.args\n init_args.pop(0) # self\n result = {k: v for k, v in options.items() if k in init_args}\n\n positional_args = init_args[:-len(argspec.defaults)]\n\n missing_args = [a for a in positional_args if a not in options]\n if missing_args:\n raise BadConfigError(\n f'Some required parameters are missing in \"{options[\"name\"]}\" config: ' +\n ', '.join(missing_args)\n )\n return result", "def generate_command_args_with_additional_fields(additional_fields):\n command_args: Dict[str, str] = {}\n actual_additional_fields: Dict[str, str] = {}\n for each_field in additional_fields:\n if each_field in DEFAULT_ARGS:\n command_args[each_field] = additional_fields[each_field]\n else:\n actual_additional_fields[each_field] = additional_fields[each_field]\n command_args[\"additional_fields\"] = remove_null_fields_and_convert_additional_fields_in_string(\n actual_additional_fields)\n return command_args", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def _arg_parse(self, **options) -> Dict[str, Any]:\n extra_options = dict()\n for key, value in options.items():\n private_key = f\"__{key}\"\n if hasattr(self, private_key):\n setattr(self, private_key, value)\n else:\n extra_options[key] = value\n\n return extra_options", "def _getArgs():\n parser = getCommonArgsParser(\n 'Generate OpenShift deployment YAML file'\n )\n\n addArgOverlayUuid(parser)\n addArgOutputFile(parser, None)\n\n return parser.parse_args()", "def add_extra_args(self):\n super(AwsCreateInstancesMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", default=os.environ.get(\"YB_EC2_KEY_PAIR_NAME\"),\n help=\"AWS Key Pair name\")\n self.parser.add_argument(\"--security_group_id\", default=None,\n help=\"AWS comma delimited security group IDs.\")\n self.parser.add_argument(\"--volume_type\", choices=[\"gp3\", \"gp2\", \"io1\"], default=\"gp2\",\n help=\"Volume type for volumes on EBS-backed instances.\")\n self.parser.add_argument(\"--spot_price\", default=None,\n help=\"Spot price for each instance (if desired)\")\n self.parser.add_argument(\"--cmk_res_name\", help=\"CMK arn to enable encrypted EBS volumes.\")\n self.parser.add_argument(\"--iam_profile_arn\", help=\"ARN string for IAM instance profile\")\n self.parser.add_argument(\"--disk_iops\", type=int, default=1000,\n help=\"desired iops for aws v4 instance volumes\")\n self.parser.add_argument(\"--disk_throughput\", type=int, default=125,\n help=\"desired throughput for aws gp3 instance volumes\")", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict", "def _get_args(self):\n parser = ArgumentParser(\n description=\"Dynamically generates Snakefiles for data \"\n \"integration and machine learning pipelines.\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=(\n \"Configuration filepath. (Will look for file named config.yml \"\n \"in current working directory, if none specified.)\"\n ),\n )\n\n parser.add_argument(\n \"-r\",\n \"--run\",\n default=False,\n help=(\n \"Runs pipeline, in addition to generating Snakefile.\"\n ),\n )\n\n # convert command-line args to a dict and return\n args = parser.parse_args()\n\n args = dict(\n (k, v) for k, v in list(vars(args).items()) if v is not None\n )\n\n return args", "def getOptions( argv ):\n opts = {} # Empty dictionary to store key-value pairs.\n while argv: # While there are arguments left to parse...\n if argv[0][0] == '-': # Found a \"-name value\" pair.\n opts[argv[0][1:]] = argv[1] # Add key and value to the dictionary.\n argv = argv[1:] # Reduce the argument list by copying it starting from index 1.\n if 'seed' in opts:\n cons.random_seed = int( opts['seed'] )\n if 'N' in opts:\n cons.N = int( opts['N'] )\n if 'MCMC' in opts:\n bcons.B = int( opts['MCMC'] )\n if 'minPop' in opts:\n bcons.min_pop_size = int( opts['minPop'] )\n if 'maxParents' in opts:\n bcons.max_parents = int( opts['maxParents'] )\n if 'localPopSize' in opts:\n bcons.A = int( opts['localPopSize'] )\n if 'binaryBOA' in opts:\n bcons.binary_coding = True if opts['binaryBOA'].lower() == 'true' else False\n if 'useBOA' in opts:\n # True by default\n if opts['useBOA'].lower() == 'true':\n cons.is_boa = True\n else:\n cons.is_boa = False\n return opts", "def args_map_custom(cls) -> dict:\n args = {}\n args.update(cls.args_map_export())\n args.update({\"json_flat\": False})\n return args", "def _template_kwargs(*, logical_name: str, bucket: str, key: str) -> Dict[str, str]:\n if logical_name == \"ArtifactBuilder\":\n return dict(ArtifactBucketName=bucket, WorkersS3Key=key)\n elif logical_name == \"LayerBuilder\":\n return dict(ReplicationBucket=bucket, WorkersS3Key=key)\n else:\n raise ValueError(f\"Unknown logical name: {logical_name}\")", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def oic_pre_construct(self, cli_info, request_args=None, **kwargs):\n for prop in self.msg_type.c_param.keys():\n if prop in request_args:\n continue\n try:\n request_args[prop] = cli_info.behaviour[prop]\n except KeyError:\n pass\n\n if \"post_logout_redirect_uris\" not in request_args:\n try:\n request_args[\n \"post_logout_redirect_uris\"] = \\\n cli_info.post_logout_redirect_uris\n except AttributeError:\n pass\n\n if \"redirect_uris\" not in request_args:\n try:\n request_args[\"redirect_uris\"] = cli_info.redirect_uris\n except AttributeError:\n raise MissingRequiredAttribute(\"redirect_uris\", request_args)\n\n try:\n if cli_info.provider_info[\n 'require_request_uri_registration'] is True:\n request_args['request_uris'] = cli_info.generate_request_uris(\n cli_info.requests_dir)\n except KeyError:\n pass\n\n return request_args, {}", "def _parse_create_args(self, args):\r\n data = {\r\n \"hourly\": args['--hourly'],\r\n \"cpus\": args['--cpu'],\r\n \"domain\": args['--domain'],\r\n \"hostname\": args['--hostname'],\r\n \"private\": args['--private'],\r\n \"dedicated\": args['--dedicated'],\r\n \"disks\": args['--disk'],\r\n \"local_disk\": not args['--san'],\r\n }\r\n\r\n try:\r\n memory = int(args['--memory'])\r\n if memory < 1024:\r\n memory = memory * 1024\r\n except ValueError:\r\n unit = args['--memory'][-1]\r\n memory = int(args['--memory'][0:-1])\r\n if unit in ['G', 'g']:\r\n memory = memory * 1024\r\n if unit in ['T', 'r']:\r\n memory = memory * 1024 * 1024\r\n\r\n data[\"memory\"] = memory\r\n\r\n if args['--monthly']:\r\n data['hourly'] = False\r\n\r\n if args.get('--os'):\r\n data['os_code'] = args['--os']\r\n\r\n if args.get('--image'):\r\n data['image_id'] = args['--image']\r\n\r\n if args.get('--datacenter'):\r\n data['datacenter'] = args['--datacenter']\r\n\r\n if args.get('--network'):\r\n data['nic_speed'] = args.get('--network')\r\n\r\n if args.get('--userdata'):\r\n data['userdata'] = args['--userdata']\r\n elif args.get('--userfile'):\r\n with open(args['--userfile'], 'r') as userfile:\r\n data['userdata'] = userfile.read()\r\n\r\n if args.get('--postinstall'):\r\n data['post_uri'] = args.get('--postinstall')\r\n\r\n # Get the SSH keys\r\n if args.get('--key'):\r\n keys = []\r\n for key in args.get('--key'):\r\n key_id = resolve_id(SshKeyManager(self.client).resolve_ids,\r\n key, 'SshKey')\r\n keys.append(key_id)\r\n data['ssh_keys'] = keys\r\n\r\n if args.get('--vlan_public'):\r\n data['public_vlan'] = args['--vlan_public']\r\n\r\n if args.get('--vlan_private'):\r\n data['private_vlan'] = args['--vlan_private']\r\n\r\n return data", "def _initiate_meta(kwargs, activity, ignores=()):\n meta = {AssociatedObjectId.ACTIVITY_ID: str(_retrieve_object_id(activity))}\n # also add the keys' in their snake case appearance so noPadding and no_padding, customHeight and custom_height\n keys_in_kwargs = KECARD_COMMON_KEYS + [snakecase(k) for k in KECARD_COMMON_KEYS]\n\n # initiate the meta based on known kwarg arguments\n for key in list(set(keys_in_kwargs)):\n if key in kwargs:\n meta[camelcase(key)] = kwargs.pop(key)\n\n # we check for custom_height specifically and deal with it.\n if snakecase(MetaWidget.CUSTOM_HEIGHT) in kwargs:\n meta[MetaWidget.CUSTOM_HEIGHT] = kwargs.pop(snakecase(MetaWidget.CUSTOM_HEIGHT))\n\n # remove the 'ignores' from the meta\n for key in ignores:\n if key in meta:\n del meta[key]\n\n return meta", "def check_args(args):\n map_args = {}\n\n if args['frequencies'] is None:\n return None\n\n if args['instance_type'] is None:\n return None\n\n if args['name'] is None:\n return None\n\n instance_details = AWS_INSTANCES.get(args['instance_type'])\n if instance_details is None:\n LOGGER.error('The instance type {0} is not supported.'.format(args['instance_type']))\n return None\n else:\n LOGGER.info(\n 'instance: {0}, vCPU: {1}, RAM: {2}GB, Disks: {3}x{4}GB, IOPS: {5}'.format(\n args['instance_type'],\n instance_details.vCPU,\n instance_details.memory,\n instance_details.number_disks,\n instance_details.size,\n instance_details.iops_support))\n\n map_args.update({\n 'ami_id': args['ami_id'] if args['ami_id'] is not None else AWS_AMI_ID,\n 'created_by': args['created_by'] if args['created_by'] is not None else getpass.getuser(),\n 'spot_price': args['spot_price'] if args['spot_price'] is not None else None,\n 'user_data': get_script(args['bash_script'] if args['bash_script'] is not None else BASH_SCRIPT_CLEAN_02),\n 'setup_disks': get_script(BASH_SCRIPT_SETUP_DISKS),\n 'instance_details': instance_details,\n })\n return map_args", "def init_json_args(cls):\n defaults = cls._base_json_args()\n\n if cls.__json_args__ is None:\n cls.__json_args__ = defaults\n else:\n cls.__json_args__ = mapping_extend(defaults, cls.__json_args__)", "def get_args():\n parser = ag.ArgumentParser(description='Places cations between oxygens '\n 'according to doi://10.1261/rna.2390311')\n parser.add_argument('-f', '--file', type=ag.FileType('r'),\n help='PDB file', required=True)\n\n parser.add_argument('-o', '--output', type=ag.FileType('w+'),\n help='Output PDB file', required=True)\n\n parser.add_argument('-c', '--cation-type', type=str,\n help='Cation type: NA, MG or CA (default NA)',\n choices = ['NA', 'MG', 'CA'],\n default='NA')\n \n parser.add_argument('-a', '--acid-type', type=str,\n help='Amino Acid type: RNA or DNA (default RNA)',\n choices = ['RNA', 'DNA'],\n default='RNA')\n\n parser.add_argument('-r', '--rotation-angle', type=int,\n help='Rotation angle in degrees (default 5)',\n default = 5)\n \n parser.add_argument('-n', '--number', type=int,\n help='Number of added cations, 0 - infinite (default 0)',\n default = 0)\n \n parser.add_argument('--na-run', action='store_true', \n help='Fill with Na cations')\n \n parser.add_argument('--na-limit', type=int,\n help='Number of Na cations to be added during Na fill'\n 'run. 0 - infinite (default 0)', default = 0)\n \n get_args = parser.parse_args()\n\n args_dict = {\n 'file' : get_args.file,\n 'output' : get_args.output,\n 'cation' : get_args.cation_type,\n 'acid' : get_args.acid_type,\n 'angle' : get_args.rotation_angle,\n 'number' : get_args.number,\n 'na_run' : get_args.na_run,\n 'na_limit': get_args.na_limit}\n\n return args_dict", "def _extract_params(self, kwargs, hyperparameters):\n init_params = dict()\n fit_params = dict()\n produce_params = dict()\n\n for name, param in hyperparameters.get('fixed', dict()).items():\n if name in kwargs:\n value = kwargs.pop(name)\n\n elif 'default' in param:\n value = param['default']\n\n else:\n raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n\n init_params[name] = value\n\n for name, param in hyperparameters.get('tunable', dict()).items():\n if name in kwargs:\n init_params[name] = kwargs.pop(name)\n\n if not isinstance(self.fit_args, str):\n fit_args = [arg['name'] for arg in self.fit_args]\n else:\n fit_args = []\n\n if not isinstance(self.produce_args, str):\n produce_args = [arg['name'] for arg in self.produce_args]\n else:\n produce_args = []\n\n for name in list(kwargs.keys()):\n if name in fit_args:\n fit_params[name] = kwargs.pop(name)\n\n elif name in produce_args:\n produce_params[name] = kwargs.pop(name)\n\n if kwargs:\n error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n raise TypeError(error)\n\n return init_params, fit_params, produce_params", "def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args", "def _get_input_args(bam_file, data, out_base, background):\n if dd.get_genome_build(data) in [\"hg19\"]:\n return [\"--PileupFile\", _create_pileup(bam_file, data, out_base, background)]\n else:\n return [\"--BamFile\", bam_file]", "def parse_arguments():\n custom_config = config.read()\n arguments = docopt(__doc__, version='Montanus %s' % __version__)\n logger.debug(custom_config)\n conf_file = arguments.get('--with-conf')\n if conf_file is not None:\n conf_config = config.read(conf_file)\n\n for (k, v) in conf_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(arguments)\n command_config = {\n 'templates_path': arguments.get('<templates_path>'),\n 'static_files_path': arguments.get('--with-static-files-path') \\\n if arguments.get('-with-static-files-path') is not None \\\n else arguments.get('<templates_path>'),\n 'delete_source': arguments.get('--delete'),\n 'protocol': arguments.get('--with-protocol'),\n 'domains': arguments.get('--with-domains').split(',') \\\n if arguments.get('--with-domains') is not None \\\n else None,\n 'md5_len': int(arguments.get('--with-md5-len')),\n 'md5_concat_by': arguments.get('--with-md5-concat-by')\n }\n logger.debug(command_config)\n\n for (k, v) in command_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(custom_config)\n return DictWrapper(custom_config)", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def _base_json_args(cls):\n relationships = cls.__mapper__.relationships.keys()\n relationship_options = dict([(x, False) for x in relationships])\n\n defaults = {'relationships': relationship_options,\n 'exclude_attrs': [],\n 'include_attrs': []}\n return defaults", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def _create_container_args(kwargs):\n # Copy over kwargs which can be copied directly\n create_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_CREATE_KWARGS:\n create_kwargs[key] = kwargs.pop(key)\n host_config_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_HOST_CONFIG_KWARGS:\n host_config_kwargs[key] = kwargs.pop(key)\n\n # Process kwargs which are split over both create and host_config\n ports = kwargs.pop('ports', {})\n if ports:\n host_config_kwargs['port_bindings'] = ports\n\n volumes = kwargs.pop('volumes', {})\n if volumes:\n host_config_kwargs['binds'] = volumes\n\n network = kwargs.pop('network', None)\n network_driver_opt = kwargs.pop('network_driver_opt', None)\n if network:\n network_configuration = {'driver_opt': network_driver_opt} \\\n if network_driver_opt else None\n\n create_kwargs['networking_config'] = {network: network_configuration}\n host_config_kwargs['network_mode'] = network\n\n # All kwargs should have been consumed by this point, so raise\n # error if any are left\n if kwargs:\n raise create_unexpected_kwargs_error('run', kwargs)\n\n create_kwargs['host_config'] = HostConfig(**host_config_kwargs)\n\n # Fill in any kwargs which need processing by create_host_config first\n port_bindings = create_kwargs['host_config'].get('PortBindings')\n if port_bindings:\n # sort to make consistent for tests\n create_kwargs['ports'] = [tuple(p.split('/', 1))\n for p in sorted(port_bindings.keys())]\n if volumes:\n if isinstance(volumes, dict):\n create_kwargs['volumes'] = [\n v.get('bind') for v in volumes.values()\n ]\n else:\n create_kwargs['volumes'] = [\n _host_volume_from_bind(v) for v in volumes\n ]\n return create_kwargs", "def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]", "def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args", "def process_cmdline_args():\n parser = argparse.ArgumentParser(description='create s3 account using s3cipher generated keys')\n subparsers = parser.add_subparsers(dest='action')\n create_bg_acc = subparsers.add_parser('CreateBGDeleteAccount', help='Create background delete service account')\n\n create_bg_acc.add_argument('--ldapuser', help='sgiam ldap user name', type=str, required=True)\n create_bg_acc.add_argument('--ldappasswd', help='sgiam ldap user password', type=str, required=True)\n\n args = parser.parse_args()\n\n try:\n if args.action in g_supported_ldap_action_table.keys():\n action_obj = LdapAccountAction(args.ldapuser, args.ldappasswd)\n if args.action == 'CreateBGDeleteAccount':\n action_obj.create_account(g_supported_ldap_action_table[args.action])\n\n result_dict = g_supported_ldap_action_table[args.action]\n action_obj.print_create_account_results(result_dict)\n except Exception as e:\n print(\"Exception : {}\".format(e))\n print(\"Traceback : {}\".format(traceback.format_exc()))\n parser.print_help()", "def __init__(self, *args, **kwargs):\n \n for arg in args:\n if isinstance(arg, dict):\n for key, value in arg.items():\n self[key] = value\n if hasattr(arg, \"__dict__\"):\n for key, value in arg.__dict__.items():\n self[key] = value\n\n if kwargs:\n for key, value in kwargs.items():\n self[key] = value", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def initialize():\n\n parser = argparse.ArgumentParser(\n description='This function takes a gene count file, a gene name, and \\\n an output file as parameters, and creates a file with the \\\n sample IDs and counts for that gene.')\n parser.add_argument('-i',\n '--data',\n type=str,\n help='The file name of the dataset.',\n required=True)\n parser.add_argument('-g',\n '--gene',\n type=str,\n help='The name of the target gene.',\n required=True)\n parser.add_argument('-o',\n '--output',\n type=str,\n help='The file name of the output file.',\n required=True)\n\n args_parse = parser.parse_args()\n\n return args_parse", "def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found", "def get_options(cmd_args=None):\n cmd_parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n cmd_parser.add_argument(\n '-i',\n '--input_file',\n help=\"\"\"a log file to be cleaned up\"\"\",\n type=str,\n default='')\n cmd_parser.add_argument(\n '-s',\n '--salt',\n help=\"\"\"the salt for anonymizing IPs [optional, defaults to hardcoded one]\"\"\",\n type=str,\n default=salt)\n\n args = cmd_parser.parse_args(cmd_args)\n\n options = {}\n options['input_file'] = args.input_file\n options['salt'] = args.salt\n\n return options", "def get_args_from_console(args):\n return {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def get_args():\n\n parser = argparse.ArgumentParser(description=\"Get DC, Clusters, Hosts and VM in JSON.\")\n parser.add_argument('-H', '--host', nargs=1, required=True, help='The vCenter to connect to',\n dest='host', type=str)\n parser.add_argument('-p', '--password', nargs=1, required=False,\n help='The password with which to connect to the VC. If not specified, the user is prompted at runtime for a password',\n dest='password', type=str)\n parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the host',\n dest='username', type=str)\n args = parser.parse_args()\n return args", "def get_args():\n args_obj = None\n parser = argparse.ArgumentParser(description='This tool is for installing mellanox-os')\n parser.add_argument('-s', '--switch-name', help='Switch name to connect', required=True)\n parser.add_argument('-u', '--switch-username', help='Switch name to connect', default='admin')\n parser.add_argument('-sp', '--switch-password', help='Switch name to connect', default='admin')\n parser.add_argument('-i', '--switch_ip', help='Switch ip to connect')\n parser.add_argument('-b', '--install', action='store_true', help='Install mellanox-os')\n parser.add_argument('-d', '--fetch', action='store_true', help='fetch mellanox-os')\n parser.add_argument('-f', '--force', action='store_true', help='force fetch and install')\n\n parser.add_argument('-l', '--image-path', help='image path location')\n parser.add_argument('-n', '--image-name', help='image name')\n\n parser.add_argument('-m', '--master-ip', help='master ip to fetch the image from')\n parser.add_argument('-p', '--master-password', help='master password to connect from the switch')\n parser.add_argument('-v', '--verbosity', help='increase output verbosity')\n\n try:\n args_obj = parser.parse_args()\n if args_obj.install is True and args_obj.image_name is None:\n parser.error('--install can only be used when image-path and image-name are provided.')\n if args_obj.fetch is True and args_obj.master_ip is None or args_obj.master_password is None or\\\n args_obj.image_path is None:\n parser.error('--fetch can only be used when master-ip and master-password are provided.')\n\n except IOError as exc:\n parser.error(str(exc))\n return args_obj", "def _get_constructor_parameters(self) -> Dict[str, Any]:\n return dict(\n obs_space=self.obs_space,\n action_space=self.action_space,\n scale_imgs=self.scale_imgs,\n )", "def _parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', help=argparse.SUPPRESS)\n parser.add_argument('--no-make-lib', action='store_true',\n help='skip building dumps while creating references')\n parser.add_argument('-libs', action='append',\n help='libs to create references for')\n parser.add_argument('-products', action='append',\n help='products to create references for')\n parser.add_argument('--build-variant', default='userdebug',\n help='build variant to create references for')\n parser.add_argument('--compress', action='store_true',\n help=argparse.SUPPRESS)\n parser.add_argument('-ref-dump-dir',\n help='directory to copy reference abi dumps into')\n\n args = parser.parse_args()\n\n if args.version is not None:\n parser.error('--version is deprecated. Please specify the version in '\n 'the reference dump directory path. e.g., '\n '-ref-dump-dir prebuilts/abi-dumps/platform/current/64')\n\n if args.compress:\n parser.error(\"Compressed reference dumps are deprecated.\")\n\n if args.libs:\n if any(lib_name.endswith(SOURCE_ABI_DUMP_EXT_END) or\n lib_name.endswith(SO_EXT) for lib_name in args.libs):\n parser.error('-libs should be followed by a base name without '\n 'file extension.')\n\n if args.ref_dump_dir and not args.libs:\n parser.error('-libs must be given if -ref-dump-dir is given.')\n\n if args.products is None:\n # If `args.products` is unspecified, generate reference ABI dumps for\n # all products.\n args.products = PRODUCTS_DEFAULT\n\n return args", "def _parse_create_args(self, args):\r\n size = args['--size']\r\n location = args['--datacenter']\r\n return int(size), str(location)", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\"\"\n Generates json files with all the combinations of\n hyperparameter values from a configuratio file.\n \"\"\")\n\n parser.add_argument('outdir', help='output directory')\n parser.add_argument('config', help='configuration file')\n\n return parser.parse_args()", "def prepare_args(config, bootstrap):\n config = copy.deepcopy(config)\n environ = dict(copy.deepcopy(os.environ))\n\n data = {'env': bootstrap['env'],\n 'pip': pip_cmd(bootstrap['env'], '', return_path=True),\n 'requirements': bootstrap['requirements']}\n environ.update(data)\n\n if isinstance(config, string_types):\n return config.format(**environ)\n\n for key, value in iteritems(config):\n if not isinstance(value, string_types):\n continue\n config[key] = value.format(**environ)\n\n return config_to_args(config)", "def get_cli_arguments(self):\n pass", "def get_argdefaults(factory, num_skipped=0):\n args, defaults = _getargspec(factory)\n\n if defaults is not None:\n num_without_defaults = len(args) - len(defaults)\n default_values = (NO_DEFAULT,) * num_without_defaults + defaults\n else:\n default_values = (NO_DEFAULT,) * len(args)\n\n return dict(zip(args, default_values)[num_skipped:])", "def _get_init_args(self):\n signature = inspect.signature(self.__init__)\n parameters = signature.parameters\n args = [arg for arg, p in parameters.items()\n if p.kind is p.POSITIONAL_OR_KEYWORD]\n\n return {arg: getattr(self, arg) for arg in args if arg != 'self'}", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def init(kwargs: Dict[str, str]):\n\n return {key: value for key, value in kwargs.items() if '-' not in key}", "def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params", "def Get_Arguments():\n parser = argparse.ArgumentParser(description=\"Adds batch, species, subspecies \"\n \"columns to popmap file for summarizing ExDFOIL output\",\n add_help=False)\n\n required_args = parser.add_argument_group(\"Required Arguments\")\n optional_args = parser.add_argument_group(\"Optional Arguments\")\n\n ## Required Arguments\n required_args.add_argument(\"-p\", \"--popmap\",\n type=str,\n required=True,\n help=\"String; Tab-separated popmap file: indID\\tpopID\")\n\n ## Optional Arguments\n optional_args.add_argument(\"-b\", \"--batch\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing batchIDs\")\n optional_args.add_argument(\"-S\", \"--species\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing speciesIDs\")\n optional_args.add_argument(\"-s\", \"--subspecies\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing subspeciesIDs\")\n optional_args.add_argument(\"-o\", \"--outfile\",\n type=str,\n required=False,\n default=\"mysampleinfo.txt\",\n nargs=\"?\",\n help=\"Specify output filename; default=mysampleinfo.txt\")\n optional_args.add_argument(\"-h\", \"--help\",\n action=\"help\",\n help=\"Displays this help menu\")\n\n\n args = parser.parse_args()\n\n return args", "def _parse_args():\n\n parser = argparse.ArgumentParser()\n required_args = ['repo', 'commit', 'token', 'state']\n optional_args = ['context', 'description', 'url']\n for arg in required_args:\n parser.add_argument('--' + arg, required=True)\n for arg in optional_args:\n parser.add_argument('--' + arg)\n args = parser.parse_args()\n\n # resolve env vars, possibly to None (only allowed for optional args)\n for arg in required_args + optional_args:\n val = getattr(args, arg)\n if val is not None:\n if val.startswith('env:'):\n new_val = os.environ.get(val[4:])\n if new_val is None and arg in required_args:\n parser.error(\n \"Parameter '%s' is required, but the given \"\n \"environment variable '%s' is missing.\" % (\n arg, val[4:]))\n setattr(args, arg, new_val)\n # we allow users to pass \"\" for optional vars to mean None so that\n # they don't have to resort to e.g. eval\n elif val == \"\":\n if arg in required_args:\n parser.error(\n \"Parameter '%s' is required, but the given \"\n \"argument is empty.\" % arg)\n setattr(args, arg, None)\n\n return args", "def create_arg_config(environment, region, template, parameters):\r\n raw_config = {\r\n 'Environment': environment,\r\n 'Region': region\r\n }\r\n if template:\r\n raw_config['Template'] = template\r\n if parameters:\r\n raw_config['Parameters'] = dict(parameters)\r\n return Config(raw_config)", "def create_training_args(self, input_dict, output_dict, exec_properties,\n executor_class_path, training_inputs,\n job_id) -> Dict[Text, Any]:\n pass", "def universal_args(self):\n args = list(self.BASIC_ARGS)\n # Set ATF to be the bios\n args += [\"-bios\", \"%s/bl1.bin\" % self.config.atf]\n\n if self.config.linux:\n args += [\n \"-kernel\",\n \"%s/arch/arm64/boot/Image\" % self.config.linux\n ]\n args += [\"-append\", self.LINUX_ARGS]\n\n if self.config.android:\n args += self.android_drives_args()\n\n return args", "def _parse_arguments(kwargs, argv):\n retval = {}\n errors = []\n for arg in argv:\n retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)\n if retval[arg['arg_name']]:\n try:\n if arg['convert_func'] is not None:\n retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])\n except ValueError:\n errors.append({'status': '400',\n 'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])})\n if errors:\n raise ApplicationException({'errors': errors}, 400)\n return retval", "def crude_arg_parser(args=sys.argv):\n args_dict = {}\n key = None\n for e in args[1:]:\n if e[:2] == '--':\n if key:\n args_dict[key] = True # Switch arg\n key = e[2:]\n elif key:\n args_dict[key] = e\n key = None\n\n return args_dict", "def args(self, value):\n # obtener la linea de comandos convertida a dict, eliminando algunos\n self._args = self.clean_command_line(value)\n\n # obtener el archivo de configuracion\n config = self.get_config()\n\n # Cliente actual, de los parametros, este siempre tiene precedencia\n client = self._args.get('client')\n\n # Fallback lo saco de la configuracion, y si tampoco esta es un error\n if not client:\n client = config.get('client')\n self._args['client'] = client\n\n # si aca no tengo definido el cliente termino con error\n if not client:\n msg.err('Need -c option (client name). Process aborted')\n\n # obtener la configuracion para el cliente actual.\n client_config = config.get(client, {})\n\n # Mezclo argumentos de linea de comandos con configuracion\n # la linea de comandos tiene precedencia\n for item in client_config or []:\n if item not in self._args:\n self._args[item] = client_config.get(item)\n\n # agregar valores por defecto si no estan definidos\n self.add_default_values()\n\n # si aca no tengo definido la aplicacion default termino con error\n if not self._args.get('defapp'):\n msg.err('Need --defapp option (default application). '\n 'Process aborted')\n\n self.save_config()", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def get_merged_args(args):\n config_dict = load_config(args.config)\n\n args_dict = {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }\n\n for arg, value in args_dict.iteritems():\n if not value:\n args_dict[arg] = config_dict[arg]\n\n if args_dict[\"cleaning_policy\"] == POLICY:\n args_dict[\"cleaning_policy\"] = config_dict[\"cleaning_policy\"]\n\n if args_dict[\"storage_time\"] == STORAGE_TIME:\n args_dict[\"storage_time\"] = config_dict[\"storage_time\"]\n\n if args_dict[\"max_size\"] == MAX_SIZE:\n args_dict[\"max_size\"] = config_dict[\"max_size\"]\n\n return args_dict", "def init(args: Optional[List[bytes]] = None) -> None:\n warnings.warn(_deprecation_warning(), FutureWarning)\n parsed = {}\n if args:\n for arg in args:\n kv = arg.decode().split('=')\n if len(kv) == 2:\n parsed[kv[0]] = kv[1]\n collective.init(**parsed)", "def defaultargs(options):\n config = {}\n for longname, default, _ in options:\n config[longname] = default\n return config", "def init_crypto_args(**kwargs):\n d = {'algorithm': 'AES',\n 'mode': 'CBC',\n 'keysize': 256,\n 'pbkdf': 'PBKDF2HMAC',\n 'hash': 'SHA256',\n 'length': int(256/8),\n 'iterations': 100,\n 'salt': str(base64.b64encode(os.urandom(RANDOM_BYTES)), encoding='utf-8')}\n return {**d, **kwargs}", "def _CommonArgs(parser):\n image_args = parser.add_mutually_exclusive_group(required=True)\n image_building_args = image_args.add_argument_group()\n parser.add_argument(\n 'template_file_gcs_path',\n metavar='TEMPLATE_FILE_GCS_PATH',\n help=('The Google Cloud Storage location of the flex template file.'\n 'Overrides if file already exists.'),\n type=arg_parsers.RegexpValidator(r'^gs://.*',\n 'Must begin with \\'gs://\\''))\n\n image_args.add_argument(\n '--image',\n help=('Path to the any image registry location of the prebuilt flex '\n 'template image.'))\n\n parser.add_argument(\n '--sdk-language',\n help=('SDK language of the flex template job.'),\n choices=['JAVA', 'PYTHON'],\n required=True)\n\n parser.add_argument(\n '--metadata-file',\n help='Local path to the metadata json file for the flex template.',\n type=arg_parsers.FileContents())\n\n parser.add_argument(\n '--print-only',\n help=('Prints the container spec to stdout. Does not save in '\n 'Google Cloud Storage.'),\n default=False,\n action=actions.StoreBooleanProperty(\n properties.VALUES.dataflow.print_only))\n\n image_building_args.add_argument(\n '--image-gcr-path',\n help=('The Google Container Registry location to store the flex '\n 'template image to be built.'),\n type=arg_parsers.RegexpValidator(\n r'^(.*\\.){0,1}gcr.io/.*',\n ('Must begin with \\'[multi-region.]gcr.io/\\'. Please check '\n 'https://cloud.google.com/container-registry/docs/overview '\n 'for available multi-regions')),\n required=True)\n\n image_building_args.add_argument(\n '--jar',\n metavar='JAR',\n type=arg_parsers.ArgList(),\n action=arg_parsers.UpdateAction,\n help=('Local path to your dataflow pipeline jar file and all their '\n 'dependent jar files required for the flex template classpath. '\n 'You can pass them as a comma separated list or repeat '\n 'individually with --jar flag. Ex: --jar=\"code.jar,dep.jar\" or '\n '--jar code.jar, --jar dep.jar.'),\n required=True)\n\n image_building_args.add_argument(\n '--flex-template-base-image',\n help=('Flex template base image to be used while building the '\n 'container image. Allowed choices are JAVA8, JAVA11 or gcr.io '\n 'path of the specific version of the base image. For JAVA8 and '\n 'JAVA11 option, we use the latest base image version to build '\n 'the container. You can also provide a specific version from '\n 'this link https://gcr.io/dataflow-templates-base/'),\n type=arg_parsers.RegexpValidator(\n r'^JAVA11$|^JAVA8$|^gcr.io/.*',\n 'Must be JAVA11 or JAVA8 or begin with \\'gcr.io/\\''),\n required=True)\n\n image_building_args.add_argument(\n '--env',\n metavar='ENV',\n type=arg_parsers.ArgDict(),\n action=arg_parsers.UpdateAction,\n help=\n ('Environment variables to create for the Dockerfile. '\n 'You can pass them as a comma separated list or repeat individually '\n 'with --env flag. Ex: --env=\"A=B,C=D\" or --env A=B, --env C=D.'\n 'You can find the list of supported environment variables in this '\n 'link. https://cloud.google.com/dataflow/docs/guides/templates/'\n 'troubleshooting-flex-templates'\n '#setting_required_dockerfile_environment_variables'),\n required=True)", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Simple Jarvice CLI\",\n add_help=False)\n auth_group = parser.add_argument_group('auth', description='Configuration')\n auth_group.add_argument('-username', help='Jarvice username')\n auth_group.add_argument('-apikey', help='Jarvice API key')\n auth_group.add_argument('-apiurl', help='Jarvice API URL',\n default='https://api.jarvice.com')\n auth_group.add_argument('-v', help='loglevel',\n choices=['INFO', 'WARN', 'DEBUG', 'CRITICAL'],\n dest='loglevel', default='CRITICAL')\n auth_group.add_argument(\n 'command',\n choices=['connect', 'submit', 'info', 'status',\n 'action', 'terminate', 'shutdown', 'jobs',\n 'output', 'tail', 'apps', 'machines', 'summary',\n 'download', 'upload', 'wait_for', 'shutdown_all',\n 'terminate_all', 'ls'])\n\n known, unknown = parser.parse_known_args()\n return known, unknown, parser", "def __clsfn_args_kwargs(config, key, base_module=None, args=None, kwargs=None):\n logger = logging.getLogger('pytorch_lm.utils.config')\n logger.config('config: {}, key: {}, base_module: {}, args: {}, kwargs: {}'.format(\n config, key, base_module, args, kwargs))\n args = args or []\n kwargs = kwargs or {}\n module_name, _, object_name = config[key].rpartition('.')\n if base_module and not module_name:\n module = importlib.import_module(base_module)\n else:\n module = importlib.import_module(module_name)\n obj = getattr(module, object_name)\n args += config.get('args', [])\n kwargs.update(**config.get('kwargs', {}))\n return obj, args, kwargs", "def _set_default_args(self):\n self._parser.add_argument(\"username\")\n self._parser.add_argument(\"password\")\n self._parser.add_argument(\n \"--start\",\n help=\"Start date for the scraper in iso format, eg: 2017-11-19\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--end\",\n help=\"End date for the scraper in iso format\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--skip-delete\",\n help=\"Delete the scraper folder in /tmp after run\",\n action=\"store_true\",\n )", "def get_kwargs(self):\n return {}", "def get_kwargs_raw(argv):\n\tinfile = None\n\tparams = []\n\n\tif len(argv) > 1:\n\t\tinfile = argv[1]\n\n\ti = 1\n\twhile i < (len(argv)-2):\n\t\tparams.append({\n\t\t\t'x_keys':argv[i+1].split(','),\n\t\t\t'y_keys':argv[i+2].split(','),\n\t\t})\n\t\ti = i + 2\n\n\treturn [infile, params]", "def set_arg_defaults(task: \"Task\", args: Tuple, kwargs: dict) -> Tuple[Tuple, dict]:\n # Start with given kwargs.\n kwargs2 = dict(kwargs)\n\n sig = task.signature\n for i, param in enumerate(sig.parameters.values()):\n if i < len(args):\n # User already specified this arg in args.\n continue\n\n elif param.name in kwargs2:\n # User already specificed this arg in kwargs.\n continue\n\n elif param.default != param.empty:\n # Default should be used.\n kwargs2[param.name] = param.default\n return args, kwargs2", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def __init__(self, **kwargs):\n\n self.opts = {}\n self.opts.update(kwargs)\n self._v_registry = {}", "def get_args() -> Namespace:\n\n parser = ArgumentParser()\n\n parser.add_argument(\n \"command_name\",\n type=str,\n help=\"Command to execute.\")\n\n parser.add_argument(\n \"-pl\",\n \"--password-length\",\n type=int,\n default=10,\n help=\"The length of the password\")\n\n parser.add_argument(\n \"-np\",\n \"--number-of-passwords\",\n type=int,\n help=\"The number of passwords\")\n\n parser.add_argument(\n \"-xn\",\n \"--exclude-numbers\",\n action=\"store_true\",\n help=\"Do not include numbers in the generated password\"\n )\n\n parser.add_argument(\n \"-xl\",\n \"--exclude-letters\",\n action=\"store_true\",\n help=\"Do not include letters in the generated password\"\n )\n\n parser.add_argument(\n \"-xlc\",\n \"--exclude-lowercase-letters\",\n action=\"store_true\",\n help=\"Do not include lowercase letters in the generated password\"\n )\n\n parser.add_argument(\n \"-xuc\",\n \"--exclude-uppercase-letters\",\n action=\"store_true\",\n help=\"Do not include uppercase letters in the generated password\"\n )\n\n parser.add_argument(\n \"-xs\",\n \"--exclude-symbols\",\n action=\"store_true\",\n help=\"do not include symbols in the generated password\"\n )\n\n return parser.parse_args()", "def build(cls, key: str, **kwargs):\n\n def add(parser):\n return cls.add_arguments(key, parser)\n\n kwargs = {f\"{key}_{cls.alias}_\" + k: v for k, v in kwargs.items()}\n args = argparse.Namespace(**kwargs)\n args = fill_missing_args(args, add)\n return cls(key, args)", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def _build_provided_kwargs_dict( # pylint: disable=R0914\n host: str,\n privilege_levels: Optional[Dict[str, PrivilegeLevel]],\n default_desired_privilege_level: Optional[str],\n port: Optional[int],\n auth_username: Optional[str],\n auth_password: Optional[str],\n auth_private_key: Optional[str],\n auth_private_key_passphrase: Optional[str],\n auth_strict_key: Optional[bool],\n auth_bypass: Optional[bool],\n timeout_socket: Optional[float],\n timeout_transport: Optional[float],\n timeout_ops: Optional[float],\n comms_return_char: Optional[str],\n ssh_config_file: Optional[Union[str, bool]],\n ssh_known_hosts_file: Optional[Union[str, bool]],\n on_init: Optional[Callable[..., Any]],\n on_open: Optional[Callable[..., Any]],\n on_close: Optional[Callable[..., Any]],\n transport: Optional[str],\n transport_options: Optional[Dict[str, Any]],\n channel_log: Optional[Union[str, bool, BytesIO]],\n channel_log_mode: Optional[str],\n channel_lock: Optional[bool],\n logging_uid: Optional[str],\n auth_secondary: Optional[str],\n failed_when_contains: Optional[List[str]],\n textfsm_platform: Optional[str],\n genie_platform: Optional[str],\n **kwargs: Dict[Any, Any],\n) -> Dict[str, Any]:\n # dict of all args coming from the factories\n _provided_args: Dict[str, Any] = {\n \"host\": host,\n \"privilege_levels\": privilege_levels,\n \"default_desired_privilege_level\": default_desired_privilege_level,\n \"port\": port,\n \"auth_username\": auth_username,\n \"auth_password\": auth_password,\n \"auth_private_key\": auth_private_key,\n \"auth_private_key_passphrase\": auth_private_key_passphrase,\n \"auth_strict_key\": auth_strict_key,\n \"auth_bypass\": auth_bypass,\n \"timeout_socket\": timeout_socket,\n \"timeout_transport\": timeout_transport,\n \"timeout_ops\": timeout_ops,\n \"comms_return_char\": comms_return_char,\n \"ssh_config_file\": ssh_config_file,\n \"ssh_known_hosts_file\": ssh_known_hosts_file,\n \"on_init\": on_init,\n \"on_open\": on_open,\n \"on_close\": on_close,\n \"transport\": transport,\n \"transport_options\": transport_options,\n \"channel_log\": channel_log,\n \"channel_log_mode\": channel_log_mode,\n \"channel_lock\": channel_lock,\n \"logging_uid\": logging_uid,\n \"auth_secondary\": auth_secondary,\n \"failed_when_contains\": failed_when_contains,\n \"textfsm_platform\": textfsm_platform,\n \"genie_platform\": genie_platform,\n }\n\n # add back in the None/False args\n _provided_args = {key: value for key, value in _provided_args.items() if value is not None}\n\n # merge in any kwargs that maybe need to get passed down\n return {**_provided_args, **kwargs}", "def get_arguments():\n # Parsing arguments\n parser = argparse.ArgumentParser(description=__doc__, usage=\n \"{0} -h (see also \"\n \"ftp://ftp.ncbi.nih.gov/pub/taxonomy/)\"\n .format(sys.argv[0]))\n parser.add_argument('-i', dest='blast_output_file', type=isfile,\n required=True, help=\"blast_output_file\")\n parser.add_argument('-d', dest='taxadb_file', type=isfile,\n required=True, help=\"Taxadb file\")\n parser.add_argument('-o', dest='taxonomy_file', type=str, required=True,\n help=\"Output taxonomy_file\")\n return parser.parse_args()", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['base'] = self.base\n return paramDict", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('password' , type=bytearray)\n parser.add_argument('authenticator' , type=bytearray)\n parser.add_argument('encrypted_password' , type=bytearray)\n\n return parser.parse_args()", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def get_args():\n parser = ArgumentParser(description='main interface to provision system')\n parser.add_argument('--region-list', help='list of regions for provisioning purposes',\n required=True, nargs='+')\n parser.add_argument('--outfile', help='file to save region secrets to', required=True)\n args = parser.parse_args()\n return args.region_list, args.outfile", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def _create_instance_dict(**kwargs):\n inst = {}\n # NOTE(jk0): If an integer is passed as the image_ref, the image\n # service will use the default image service (in this case, the fake).\n inst['image_ref'] = '1'\n inst['reservation_id'] = 'r-fakeres'\n inst['user_id'] = kwargs.get('user_id', 'admin')\n inst['project_id'] = kwargs.get('project_id', 'fake')\n inst['instance_type_id'] = '1'\n inst['host'] = kwargs.get('host', 'dummy')\n inst['vcpus'] = kwargs.get('vcpus', 1)\n inst['memory_mb'] = kwargs.get('memory_mb', 20)\n inst['local_gb'] = kwargs.get('local_gb', 30)\n inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)\n inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)\n inst['task_state'] = kwargs.get('task_state', None)\n inst['availability_zone'] = kwargs.get('availability_zone', None)\n inst['ami_launch_index'] = 0\n inst['launched_on'] = kwargs.get('launched_on', 'dummy')\n return inst", "def pre_process(self, **kwargs):\n if 'skip' in kwargs and kwargs['skip'] is True:\n self.skip_prompt = True\n # create app directory\n self.create_app_dir()\n # generate additional params\n additional = kwargs.get('additional')\n params = self.default_additional\n if additional:\n for a in additional:\n idx = a.find('=')\n if idx > 0:\n params[a[0:idx]] = a[idx + 1:]\n kwargs['additional_params'] = params\n return kwargs", "def parse_args():\n\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='operation',\n help='Run AtPKI {command} -h for additional help')\n\n parse_bin_parser = subparsers.add_parser(\"parse_bin\",\n help=\"parse generated PKI bin\")\n parse_bin_parser.add_argument(\"--bin_file\", \"-b\", default=\"PKI.bin\",\n help=\"bin_file which need to be parsed\")\n parse_bin_parser.add_argument(\"--output_path\", \"-o\",\n help=\"output path of parsed bin file in from_bytes mode\")\n\n generate_bin_parser = subparsers.add_parser(\"generate_bin\",\n help=\"create PKI bin\")\n generate_bin_parser.add_argument(\"--bin_file\", \"-b\", default=\"PKI.bin\",\n help=\"bin_file which need to be parsed\")\n generate_bin_parser.add_argument('pki_list', metavar='<type> <file>',\n help='type (ca, cert, key,)'\n ' and file, separated by space',\n action=PKIPairAction)\n\n args = parser.parse_args()\n return args" ]
[ "0.64982295", "0.6270005", "0.60605526", "0.6001975", "0.5956914", "0.5867535", "0.586068", "0.5848655", "0.58337194", "0.57882416", "0.5759382", "0.57521", "0.5736829", "0.57335955", "0.572861", "0.571756", "0.5702735", "0.5665511", "0.5653056", "0.5628118", "0.5623889", "0.5611042", "0.56103534", "0.5593993", "0.55859137", "0.55763775", "0.5568918", "0.55402416", "0.5539436", "0.5535271", "0.5521495", "0.5509131", "0.55070263", "0.5502912", "0.5484665", "0.54834014", "0.54788554", "0.54653037", "0.5463399", "0.54513675", "0.54289895", "0.5417465", "0.5404531", "0.53824455", "0.5381075", "0.5379999", "0.5362969", "0.5361187", "0.53569263", "0.5352304", "0.5347268", "0.5343009", "0.5342304", "0.53421557", "0.53416383", "0.5336987", "0.5336347", "0.53335106", "0.53306705", "0.5324339", "0.53235793", "0.5322079", "0.53195554", "0.53169906", "0.53165925", "0.5313345", "0.53034234", "0.53018", "0.5301575", "0.52930486", "0.52926296", "0.5290471", "0.5289555", "0.52878064", "0.52856934", "0.52742636", "0.52680415", "0.52643377", "0.5264297", "0.5264236", "0.5259546", "0.52513313", "0.52483636", "0.52449304", "0.5242343", "0.52403474", "0.52332455", "0.5231144", "0.522923", "0.5227782", "0.5227782", "0.5227782", "0.5227782", "0.5225047", "0.5221539", "0.5221148", "0.52188224", "0.52160966", "0.5211424", "0.5211224" ]
0.7327621
0
Return a usable Boto object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto(args=None, logger=None, stats=None): return Boto(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }", "def get_elb(args=None, logger=None, stats=None):\n if not args:\n parser = get_parser()\n add_elb_cli_arguments(parser)\n args = parser.parse_args()\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n boto = Boto3(\n log_level=args.boto_log_level,\n access_key=args.boto_access_key,\n secret_key=args.boto_secret_key,\n region=args.boto_region,\n logger=logger,\n stats=stats,\n )\n return ELB(\n boto=boto,\n logger=logger,\n stats=stats,\n )", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def __init__(self):\n self.aws = AWS()", "def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}", "def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,\n kwargs,\n name=get_service_name,\n namespace=\"aws\",\n metadata_extractor=extract_aws_metadata,\n error_handling_type=ERROR_HANDLING_BOTOCORE,\n )", "def extract_aws_metadata(wrapped, instance, args, kwargs, return_value):\n response = return_value\n LOGGER.debug(\n \"Extracting AWS metadata\", args=args, kwargs=kwargs,\n )\n if \"operation_name\" in kwargs:\n operation_name = kwargs[\"operation_name\"]\n else:\n operation_name = args[0]\n\n # Most of the time the actual keyword arguments to the client call are\n # passed in as a positial argument after the operation name.\n if len(kwargs) == 0 and len(args) == 2:\n kwargs = args[1]\n\n region_name = instance._client_config.region_name\n\n response_metadata = response.get(\"ResponseMetadata\")\n\n metadata = {\"aws\": {\"operation\": operation_name, \"region\": region_name}}\n\n if \"TableName\" in kwargs:\n metadata[\"aws\"][\"table_name\"] = kwargs[\"TableName\"]\n if \"QueueUrl\" in kwargs:\n metadata[\"aws\"][\"queue_url\"] = kwargs[\"QueueUrl\"]\n\n if response_metadata is not None:\n metadata[\"http\"] = {\n \"response\": {\"status\": response_metadata[\"HTTPStatusCode\"]},\n }\n metadata[\"aws\"][\"request_id\"] = response_metadata[\"RequestId\"]\n\n return metadata", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client", "def __init__(self):\n super(AWSBase, self).__init__()\n self.region = config.ENV_DATA['region']\n self.aws = AWSUtil(self.region)", "def botoconn(args):\n try:\n return boto.ec2.autoscale.connect_to_region(args.region)\n except:\n print(\"FATAL ERROR:\")\n traceback.print_exc(file=sys.stdout)\n sys.exit(\"Failed to connect to AWS. Did you set the shell vars right?\")", "def make_sdk(options=None, **kwargs):\n from openstack import connection\n cloud = get_config(options=options, **kwargs)\n return connection.from_config(cloud_config=cloud, options=options)", "def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass", "def __init__(self, bucket, aws_profile=None, logger=None):\n self.bucket = bucket\n self.s3helper = S3Helper(aws_profile=aws_profile)\n self.print_func = print\n if logger:\n self.print_func = logger.info", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def aws():\n pass", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n if args['env'] == 'S3' and args['aws_region'] != '':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n logger.info('Check to see whether s3 bucket exits...')\n try:\n s3.meta.client.head_bucket(Bucket=args['bucket_name'])\n logger.info(f\"S3 bucket {args['bucket_name']} exits...\")\n except Exception as e:\n logger.warn(f\"Bucket {args['bucket_name']} doesn't exist...\")\n logger.info('Creating bucket...')\n create_s3_bucket(s3_client, args['bucket_name'], args['aws_region'])\n\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n sas_jar_ver = config['APP']['sas_jar_ver']\n os.environ['SAS_JAR'] = \".\".join(sas_jar_ver.split('.')[:-1])\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n data_dir = config['DOCKER']['data_dir']\n path = config['DOCKER']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['DOCKER']['dict_dir']\n files = json.loads(config['DOCKER']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['DOCKER']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['DOCKER']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['DOCKER']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n path = config['S3']['s3_sas_key']\n dict_dir = config['S3']['s3_dict_key']\n csv_dir = config['S3']['s3_csv_key']\n sas_file_path = os.path.join(\"s3a://\", bucket, csv_dir, path)\n files = json.loads(config['S3']['input_files'])\n airport_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['airports_file'])\n demographic_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['us_demographics_file'])\n dictionary_file = os.path.join(\"s3a://\", bucket, config['S3']['dictionary_file'])\n output_dir = os.path.join(\"s3a://\", bucket, config['S3']['output_dir'])\n else:\n base_dir = config['LOCAL']['base_dir']\n data_dir = config['LOCAL']['data_dir']\n path = config['LOCAL']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['LOCAL']['dict_dir']\n files = json.loads(config['LOCAL']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['LOCAL']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['LOCAL']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['LOCAL']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('ETL parsing has started...')\n logger.info(\"Create output dir if it doesn't exist...\")\n if args['env'] != 'S3':\n pathlib.Path(output_dir).mkdir(exist_ok=True)\n else:\n # config.set('S3', 's3_bucket_name', args['bucket_name'])\n # s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['config_dir'], Body=)\n s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['output_dir'])\n logger.info('Created S3 bucket...')\n \n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info('Register UDFs...')\n \n spark.udf.register('SASDateConverter', sas_date_converter, Date())\n logger.info('Register sas_date_converter UDF...')\n\n # change_date_format_1 = F.udf(lambda x: datetime.strptime(x.strip(), '%Y%m%d'), Date())\n # change_date_format_2 = F.udf(lambda x: datetime.strptime(x.strip(), '%m%d%Y'), Date())\n dt = F.udf(change_date_format, Date())\n\n logger.info('Read and concatenate the raw SAS files...')\n dfs = []\n for file in files:\n try:\n df = spark.read.format('com.github.saurfang.sas.spark')\\\n .load(os.path.join(sas_file_path, file))\n dfs.append(df)\n except Exception as e:\n logger.info(f'File {file} is not available. Skipping...')\n logger.info(f'Read {len(files)} files successfully...')\n df = []\n if len(dfs) > 0:\n df = concat_df(*dfs)\n logger.info(f'Successfully concatenated {len(files)}...')\n if not isinstance(df, list):\n # SAS raw data table creation begins here\n cols = ['cicid', 'i94yr', 'i94mon', 'i94port', 'i94mode', 'visapost', \n 'entdepa', 'entdepd', 'entdepu', 'matflag', \n 'dtadfile', 'dtaddto']\n parquet_tables = ['i94_immigrations', 'i94_trips', 'i94_visitors', 'i94_flights']\n f_transforms = [i94_immigrations, i94_trips, i94_visitors, i94_flights]\n res_df = None\n for table, f_transform in zip(parquet_tables, f_transforms):\n if table == 'i94_immigrations':\n # only table not using spark sql\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=None, cols=cols,\n udf=dt, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n elif table == 'i94_flights':\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='csv',\n is_partition=False,\n is_overwrite=True,\n crate_date_df=False)\n else:\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n if table == 'i94_trips':\n table = 'i94_dates'\n create_and_write_df(res_df, table, i94_dates, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n # Reference data for airports and us city demographics begins here\n airport_df = spark.createDataFrame([], R([]))\n demographic_df = spark.createDataFrame([], R([]))\n logger.info('Read the airports reference file...')\n try:\n airport_df = spark.read.option('header', True) \\\n .csv(airport_file)\n except Exception as e:\n logger.error(f'File {airport_file} is not available. Skipping...')\n\n logger.info('Read the US demographics reference file...')\n try:\n demographic_df = spark.read.options(header='True', delimiter=';') \\\n .csv(demographic_file) \n except Exception as e:\n logger.error(f'File {demographic_file} is not available. Skipping...')\n if airport_df.count() > 0 and demographic_df.count() > 0: \n csv_tables = ['i94_airports', 'i94_us_states_demographic', \n 'i94_us_cities_demographic']\n f_transforms = [i94_airports, i94_us_states_demographic, i94_us_cities_demographic]\n csv_dfs = [airport_df, demographic_df, demographic_df]\n for table, f_transform, df in zip(csv_tables, f_transforms, csv_dfs):\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=dt, fmt='csv',\n is_partition=False,\n is_overwrite=True)\n\n # SAS reference data creation begins here\n ref_csv_tables = ['i94_countries', 'i94_port_state_mapping', 'i94_travel_mode', \n 'i94_state_mapping', 'i94_visa']\n table_pos_dict = {\n 'i94_countries': [2, 3, 'country', 'country_id'],\n 'i94_port_state_mapping': [3, 4, 'city', 'i94_port'],\n 'i94_travel_mode': [4, 5, 'mode', 'mode_id'],\n 'i94_state_mapping': [5, 6, 'state', 'state_id'],\n 'i94_visa': [6, 7, 'visa_purpose', 'visa_id']\n }\n logger.info('Read the SAS data dictionary reference file...') \n for table in ref_csv_tables:\n create_and_write_ref_df(dictionary_file, table, output_dir, spark, \n fmt='csv', start_pos=table_pos_dict[table][0], \n end_pos=table_pos_dict[table][1],\n col_name=table_pos_dict[table][2], \n index_name=table_pos_dict[table][3],\n is_partition=False,\n is_overwrite=True)\n\n logger.info('ETL parsing has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def mock_amazon():\n amazon = Amazon()\n amazon.carrot1 = 'cenoura normal'\n amazon.carrot2 = 'cenoura radioativa'\n amazon.carrot_number = 575\n return amazon", "def __init__(__self__, resource_name, opts=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['aws_kms_key_arn'] = aws_kms_key_arn\n __props__['content_config'] = content_config\n __props__['content_config_permissions'] = content_config_permissions\n if input_bucket is None:\n raise TypeError(\"Missing required property 'input_bucket'\")\n __props__['input_bucket'] = input_bucket\n __props__['name'] = name\n __props__['notifications'] = notifications\n __props__['output_bucket'] = output_bucket\n if role is None:\n raise TypeError(\"Missing required property 'role'\")\n __props__['role'] = role\n __props__['thumbnail_config'] = thumbnail_config\n __props__['thumbnail_config_permissions'] = thumbnail_config_permissions\n __props__['arn'] = None\n super(Pipeline, __self__).__init__(\n 'aws:elastictranscoder/pipeline:Pipeline',\n resource_name,\n __props__,\n opts)", "def cli(profile, region, clear):\n global SESSION, BUCKET_MANAGER, DOMAIN_MANAGER, CERT_MANAGER, \\\n DIST_MANAGER, EC2_MANAGER, ECS_MANAGER\n session_cfg = {}\n if profile:\n session_cfg['profile_name'] = profile\n\n if region:\n session_cfg['region_name'] = region\n\n if clear:\n util.clear_scr()\n\n# using **<variable> python expands it as a parameter=content\n SESSION = boto3.Session(**session_cfg)\n BUCKET_MANAGER = BucketManager(SESSION)\n DOMAIN_MANAGER = DomainManager(SESSION)\n CERT_MANAGER = CertificateManager(SESSION)\n DIST_MANAGER = DistributionManager(SESSION)\n EC2_MANAGER = EC2Manager(SESSION)\n ECS_MANAGER = ECSManager(SESSION)", "def boto_client(account_id, service_name, region):\n logger.info('Creating boto3 client for account_id: {}, '\n 'service_name: {}'.format(account_id, service_name))\n return boto3.client(service_name, region_name=region)", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def default_glacier_wrapper(args, **kwargs):\n return GlacierWrapper(args.aws_access_key,\n args.aws_secret_key,\n args.region,\n bookkeeping=args.bookkeeping,\n no_bookkeeping=args.no_bookkeeping,\n bookkeeping_domain_name=args.bookkeeping_domain_name,\n sdb_access_key=args.sdb_access_key,\n sdb_secret_key=args.sdb_secret_key,\n sdb_region=args.sdb_region,\n # sns_enable=args.sns_enable,\n # sns_topic=args.sns_topic,\n # sns_monitored_vaults=args.sns_monitored_vaults,\n # sns_options=args.sns_options,\n # config_object=args.config_object,\n logfile=args.logfile,\n loglevel=args.loglevel,\n logtostdout=args.logtostdout)", "def _get_client(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3Client:\n return session.client(\"s3\") if session else boto3.client(\"s3\", region_name=region)", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def main():\n\n args = parser_args()\n exit_code = 0\n con = AWSConnect()\n\n con.delete_unused(args.noop)\n\n return exit_code", "def create_boto_session(account):\n aws_access_key_id = account['aws_access_key_id']\n aws_secret_access_key = account['aws_secret_access_key']\n region = account['region']\n #aws_profile = account['aws_profile']\n\n\n session = boto3.Session(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n region_name=region,\n #profile_name=aws_profile,\n )\n\n return session", "def mock_s3_boto_returns() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockReturns()\n\n return client", "def __init__(__self__,\n resource_name: str,\n args: ObjectStorageKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def cli(obj, *, default_type=None):\n if inspect.isfunction(obj):\n return function2cli(obj, default_type=default_type)\n else:\n return obj2cli(obj, default_type=default_type)", "def get_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get_s3_client(args: argparse.Namespace) -> botocore.clients.s3:\n\n assert args.s3_region_name is not None, \"set COMPSYN_S3_REGION_NAME\"\n assert args.s3_access_key_id is not None, \"set COMPSYN_S3_ACCESS_KEY_ID\"\n assert args.s3_secret_access_key is not None, \"set COMPSYN_S3_SECRET_ACCESS_KEY\"\n assert args.s3_bucket is not None, \"set COMPSYN_S3_BUCKET\"\n\n return boto3.session.Session().client(\n \"s3\",\n region_name=args.s3_region_name,\n endpoint_url=args.s3_endpoint_url,\n aws_access_key_id=args.s3_access_key_id,\n aws_secret_access_key=args.s3_secret_access_key,\n )", "def __init__(__self__,\n resource_name: str,\n args: BucketACLArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, resource_name, opts=None, allocated_capacity=None, command=None, connections=None, default_arguments=None, description=None, execution_property=None, glue_version=None, max_capacity=None, max_retries=None, name=None, number_of_workers=None, role_arn=None, security_configuration=None, tags=None, timeout=None, worker_type=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['allocated_capacity'] = allocated_capacity\n if command is None:\n raise TypeError(\"Missing required property 'command'\")\n __props__['command'] = command\n __props__['connections'] = connections\n __props__['default_arguments'] = default_arguments\n __props__['description'] = description\n __props__['execution_property'] = execution_property\n __props__['glue_version'] = glue_version\n __props__['max_capacity'] = max_capacity\n __props__['max_retries'] = max_retries\n __props__['name'] = name\n __props__['number_of_workers'] = number_of_workers\n if role_arn is None:\n raise TypeError(\"Missing required property 'role_arn'\")\n __props__['role_arn'] = role_arn\n __props__['security_configuration'] = security_configuration\n __props__['tags'] = tags\n __props__['timeout'] = timeout\n __props__['worker_type'] = worker_type\n __props__['arn'] = None\n super(Job, __self__).__init__(\n 'aws:glue/job:Job',\n resource_name,\n __props__,\n opts)", "def __init__(self, looking_for_tags: dict):\n self.looking_for_tags = looking_for_tags\n self.ec2 = boto3.resource('ec2')", "def get_bucket(bucket):\n if isinstance(bucket, Bucket):\n return bucket\n if isinstance(bucket, str):\n return setup_bucket(bucket)\n else:\n raise TypeError(\"Expected bucket to be Bucket or str was %s \" % type(bucket))", "def __init__(self):\n self.compute = AwsCompute()\n Menu('Amazon Web Services (AWS) Compute operations', [\n MenuEntry('Go back', None),\n MenuEntry('List all the instances', self.list_instances),\n MenuEntry('Stop all the instances', self.stop_all_instances),\n MenuEntry('List running instances', self.list_running_instances),\n MenuEntry('Detail a running instance', self.detail_running_instance),\n MenuEntry('Start a specific instance', self.start_instance),\n MenuEntry('Stop a specific instance', self.stop_instance),\n MenuEntry('Start a new instance given an AMI', self.create_instance_by_image),\n MenuEntry('Start a new instance given the Operating System', self.create_instance_by_os),\n MenuEntry('List volumes', self.list_volumes),\n MenuEntry('Attach a volume', self.attach_volume),\n MenuEntry('Detach a volume', self.detach_volume),\n ]).run()", "def parse_cli(passed_args=None):\n if passed_args is None:\n passed_args = sys.argv[1:]\n parser = argparse.ArgumentParser(description=\"Clone a Launch Configuration.\"\n \" Note: Any options passed will override\"\n \" the cloned LCs settings, rather than\"\n \" append to them.\"\n \" Note2: Requires shell variables to be\"\n \" set: AWS_DEFAULT_REGION,\"\n \" AWS_ACCESS_KEY_ID, and\"\n \" AWS_SECRET_ACCESS_KEY\"\n )\n parser.add_argument(\"old_lc_name\", action='store',\n help=\"The name of the LC to clone.\")\n parser.add_argument(\"new_lc_name\", action='store',\n help=\"The new LC name.\")\n parser.add_argument(\"--ami\", action='store',\n dest='image_id', default=None,\n help=\"The AMI ID for the launch config\")\n parser.add_argument(\"--ssh-key\", action='store',\n dest='key_name', default=None,\n help=\"The SSH for the launch config\")\n parser.add_argument(\"--security-group\", action='append',\n dest='security_groups',\n help=\"The (or one of the) security group for the LC.\"\n \" May be specified multiple times\")\n parser.add_argument(\"--user-data-script\", action='store',\n type=parse_user_data, default=None, dest='user_data',\n help=\"The file containing the user data script\")\n parser.add_argument(\"--instance-type\", action='store',\n default=None,\n help=\"The instance type\")\n parser.add_argument(\"--enable-instance-monitoring\", action='store_const',\n const=True, default=None, dest='instance_monitoring',\n help=\"enable instance-monitoring\")\n parser.add_argument(\"--disable-instance-monitoring\", action='store_const',\n const=False, default=None, dest='instance_monitoring',\n help=\"enable instance-monitoring\")\n parser.add_argument(\"--spot-price\", action='store',\n type=float, default=None,\n help=\"The spot price\")\n parser.add_argument(\"--instance-profile-name\", action='store',\n default=None,\n help=\"The name (or ARN) of the instance profile for\"\n \" these instances\")\n parser.add_argument(\"--enable-ebs-optimized\", action='store_const',\n const=True, default=None, dest='ebs_optimized',\n help=\"enable ebs optimized\")\n parser.add_argument(\"--disable-ebs-optimized\", action='store_const',\n const=False, default=None, dest='ebs_optimized',\n help=\"enable ebs optimized\")\n parser.add_argument(\"--enable-associate-public-ip-address\", action='store_const',\n const=True, default=None, dest='associate_public_ip_address',\n help=\"enable association of public ip addresses\")\n parser.add_argument(\"--disable-associate-public-ip-address\", action='store_const',\n const=False, default=None, dest='associate_public_ip_address',\n help=\"enable association of public ip addresses\")\n parser.add_argument(\"--region\", action='store',\n default=os.environ['AWS_DEFAULT_REGION'],\n help=\"region within which to clone\")\n args = parser.parse_args(passed_args)\n return args", "def __init__(__self__,\n resource_name: str,\n args: ReplicatedBucketArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def get_boto_ses_connection():\n access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)\n access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)\n api_endpoint = getattr(settings, 'AWS_SES_API_HOST',\n SESConnection.DefaultHost)\n\n return SESConnection(\n aws_access_key_id=access_key_id,\n aws_secret_access_key=access_key,\n host=api_endpoint,\n )", "def __init__(__self__, resource_name, opts=None, block_device_mappings=None, capacity_reservation_specification=None, credit_specification=None, description=None, disable_api_termination=None, ebs_optimized=None, elastic_gpu_specifications=None, elastic_inference_accelerator=None, iam_instance_profile=None, image_id=None, instance_initiated_shutdown_behavior=None, instance_market_options=None, instance_type=None, kernel_id=None, key_name=None, license_specifications=None, monitoring=None, name=None, name_prefix=None, network_interfaces=None, placement=None, ram_disk_id=None, security_group_names=None, tag_specifications=None, tags=None, user_data=None, vpc_security_group_ids=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['block_device_mappings'] = block_device_mappings\n __props__['capacity_reservation_specification'] = capacity_reservation_specification\n __props__['credit_specification'] = credit_specification\n __props__['description'] = description\n __props__['disable_api_termination'] = disable_api_termination\n __props__['ebs_optimized'] = ebs_optimized\n __props__['elastic_gpu_specifications'] = elastic_gpu_specifications\n __props__['elastic_inference_accelerator'] = elastic_inference_accelerator\n __props__['iam_instance_profile'] = iam_instance_profile\n __props__['image_id'] = image_id\n __props__['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior\n __props__['instance_market_options'] = instance_market_options\n __props__['instance_type'] = instance_type\n __props__['kernel_id'] = kernel_id\n __props__['key_name'] = key_name\n __props__['license_specifications'] = license_specifications\n __props__['monitoring'] = monitoring\n __props__['name'] = name\n __props__['name_prefix'] = name_prefix\n __props__['network_interfaces'] = network_interfaces\n __props__['placement'] = placement\n __props__['ram_disk_id'] = ram_disk_id\n __props__['security_group_names'] = security_group_names\n __props__['tag_specifications'] = tag_specifications\n __props__['tags'] = tags\n __props__['user_data'] = user_data\n __props__['vpc_security_group_ids'] = vpc_security_group_ids\n __props__['arn'] = None\n __props__['default_version'] = None\n __props__['latest_version'] = None\n super(LaunchTemplate, __self__).__init__(\n 'aws:ec2/launchTemplate:LaunchTemplate',\n resource_name,\n __props__,\n opts)", "def get_boto3_client(\n *,\n aws_lambda_mode: bool,\n service_name: str,\n profile_name: str = 'kreodont',\n connect_timeout: float = 0.2,\n read_timeout: float = 0.4,\n) -> Optional[boto3.client]:\n known_services = ['translate', 'dynamodb', 's3']\n if service_name in global_cached_boto3_clients:\n print(f'{service_name} client taken from cache!')\n return global_cached_boto3_clients[service_name]\n\n if service_name not in known_services:\n raise Exception(\n f'Not known service '\n f'name {service_name}. The following '\n f'service names known: {\", \".join(known_services)}')\n\n if aws_lambda_mode:\n client = boto3.client(\n service_name,\n config=botocore.client.Config(\n connect_timeout=connect_timeout,\n read_timeout=read_timeout,\n parameter_validation=False,\n retries={'max_attempts': 0},\n ),\n )\n else:\n client = boto3.Session(profile_name=profile_name).client(service_name)\n return client\n\n # saving to cache to to spend time to create it next time\n global_cached_boto3_clients[service_name] = client\n return client", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def _get_buckets():\n\n return __opts__[\"s3.buckets\"] if \"s3.buckets\" in __opts__ else {}", "def parse_args():\n logging.getLogger().debug(\"parse_args()\")\n parser = argparse.ArgumentParser(description='AWS instance health')\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='verbosity level, specify multiple')\n parser.add_argument('--clusterid', default=\"default\",\n help='clusterid')\n parser.add_argument('--namespace', default=\"default\",\n help='Project namespace')\n args = parser.parse_args()\n if args.verbose > 0:\n logging.getLogger().setLevel(logging.DEBUG)\n return args", "def __init__(__self__, *,\n add_on: Optional[pulumi.Input['InstanceAddOnArgs']] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if add_on is not None:\n pulumi.set(__self__, \"add_on\", add_on)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if blueprint_id is not None:\n pulumi.set(__self__, \"blueprint_id\", blueprint_id)\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if cpu_count is not None:\n pulumi.set(__self__, \"cpu_count\", cpu_count)\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if ip_address_type is not None:\n pulumi.set(__self__, \"ip_address_type\", ip_address_type)\n if ipv6_address is not None:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if is_static_ip is not None:\n pulumi.set(__self__, \"is_static_ip\", is_static_ip)\n if key_pair_name is not None:\n pulumi.set(__self__, \"key_pair_name\", key_pair_name)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if public_ip_address is not None:\n pulumi.set(__self__, \"public_ip_address\", public_ip_address)\n if ram_size is not None:\n pulumi.set(__self__, \"ram_size\", ram_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def get_s3_args(\n parser: Optional[argparse.ArgumentParser] = None,\n) -> argparse.ArgumentParser:\n\n if parser is None:\n parser = argparse.ArgumentParser()\n\n s3_parser = parser.add_argument_group(\"s3\")\n\n s3_parser.add_argument(\n \"--s3-bucket\",\n type=str,\n action=env_default(\"COMPSYN_S3_BUCKET\"),\n required=False,\n help=\"bucket where img data is stored in S3\",\n )\n s3_parser.add_argument(\n \"--s3-region-name\",\n type=str,\n required=False,\n action=env_default(\"COMPSYN_S3_REGION_NAME\"),\n help=\"S3 region\",\n )\n s3_parser.add_argument(\n \"--s3-endpoint-url\",\n action=env_default(\"COMPSYN_S3_ENDPOINT_URL\"),\n required=False,\n help=\"S3 endpoint URL (only required for non-AWS S3)\",\n )\n s3_parser.add_argument(\n \"--s3-access-key-id\",\n type=str,\n action=env_default(\"COMPSYN_S3_ACCESS_KEY_ID\"),\n required=False,\n )\n s3_parser.add_argument(\n \"--s3-secret-access-key\",\n type=str,\n action=env_default(\"COMPSYN_S3_SECRET_ACCESS_KEY\"),\n required=False,\n )\n\n return parser", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def get(resource_name, id, opts=None, allocated_capacity=None, arn=None, command=None, connections=None, default_arguments=None, description=None, execution_property=None, glue_version=None, max_capacity=None, max_retries=None, name=None, number_of_workers=None, role_arn=None, security_configuration=None, tags=None, timeout=None, worker_type=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"allocated_capacity\"] = allocated_capacity\n __props__[\"arn\"] = arn\n __props__[\"command\"] = command\n __props__[\"connections\"] = connections\n __props__[\"default_arguments\"] = default_arguments\n __props__[\"description\"] = description\n __props__[\"execution_property\"] = execution_property\n __props__[\"glue_version\"] = glue_version\n __props__[\"max_capacity\"] = max_capacity\n __props__[\"max_retries\"] = max_retries\n __props__[\"name\"] = name\n __props__[\"number_of_workers\"] = number_of_workers\n __props__[\"role_arn\"] = role_arn\n __props__[\"security_configuration\"] = security_configuration\n __props__[\"tags\"] = tags\n __props__[\"timeout\"] = timeout\n __props__[\"worker_type\"] = worker_type\n return Job(resource_name, opts=opts, __props__=__props__)", "def __getattr__(self, attr):\n\n # This way, we don't have to write: rv = Boto().boto.some_call\n # But can just write: rv = Boto().some_call\n # This also gives us hooks for future logging/timers/etc and\n # extended wrapping of things the attributes return if we so\n # choose.\n\n self._logger.debug('Calling wrapped boto attribute: %s on %s', attr, self)\n\n attr = getattr(self._boto, attr)\n\n if callable(attr):\n self._logger.debug(\"Boto attribute '%s' is callable\", attr)\n\n @wraps(attr)\n def wrapper(*args, **kwargs):\n return attr(*args, **kwargs)\n return wrapper\n\n return attr", "def __init__(__self__, resource_name, opts=None, cloudwatch_logging_options=None, code=None, description=None, inputs=None, name=None, outputs=None, reference_data_sources=None, tags=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['cloudwatch_logging_options'] = cloudwatch_logging_options\n __props__['code'] = code\n __props__['description'] = description\n __props__['inputs'] = inputs\n __props__['name'] = name\n __props__['outputs'] = outputs\n __props__['reference_data_sources'] = reference_data_sources\n __props__['tags'] = tags\n __props__['arn'] = None\n __props__['create_timestamp'] = None\n __props__['last_update_timestamp'] = None\n __props__['status'] = None\n __props__['version'] = None\n super(AnalyticsApplication, __self__).__init__(\n 'aws:kinesis/analyticsApplication:AnalyticsApplication',\n resource_name,\n __props__,\n opts)", "def get_object(Bucket=None, IfMatch=None, IfModifiedSince=None, IfNoneMatch=None, IfUnmodifiedSince=None, Key=None, Range=None, ResponseCacheControl=None, ResponseContentDisposition=None, ResponseContentEncoding=None, ResponseContentLanguage=None, ResponseContentType=None, ResponseExpires=None, VersionId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None, PartNumber=None):\n pass", "def cli():\n parser=argparse.ArgumentParser(\n description = 'Rotate through a given AWS account for per application keys. Keys are temporarily loaded into environment variables. Asks for a SSO cookie value.')\n parser.add_argument('role', help = 'Role to harvest session keys as')\n parser.add_argument(\n '-c', '--command', help = 'Custom command to run.', default = None)\n parser.add_argument('-a', '--application',\n help = 'Provide a specific application', default = None)\n parser.add_argument(\n '-l', '--list', help = 'Provide a list of applications. Lists should be one Application#,Application Name per line', default = None)\n parser.add_argument(\n '-p', '--awspx', help = 'Run awspx across all applications. Install from https://github.com/FSecureLABS/awspx', action=argparse.BooleanOptionalAction, default = False)\n parser.add_argument(\n '-s', '--scoutsuite', help = 'Run ScoutSuite across all applications. Install from https://github.com/nccgroup/ScoutSuite', action=argparse.BooleanOptionalAction, default = False)\n args=parser.parse_args()\n\n print(\"Please provide an SSO cookie value. Obtain from the dev console on a web browser, probably named something like x-amz-sso_authn\")\n token=input()\n\n return args.role, args.list, args.application, args.command, token, args.awspx, args.scoutsuite", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"build\"] = None\n __props__.__dict__[\"config\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"state_message\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def obj2cli(obj, *, default_type=None):\n parser = generate_parser_obj(obj, default_type=default_type)\n\n def inner(argv=None, exit=True):\n import sys\n if argv is None:\n argv = sys.argv[1:]\n\n args = parser.parse_args(argv)\n # Delete subparser identifier\n args.__delattr__('{command}')\n func = args.__getattribute__('{func}')\n args.__delattr__('{func}')\n\n result = apply_namespace(func, args)\n\n if result is not None:\n print(result)\n if exit: sys.exit(0)\n\n return inner", "def __init__(__self__,\n resource_name: str,\n args: InstanceStateArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def __init__(__self__,\n resource_name: str,\n args: Optional[TargetPoolArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create_default(cls, enable_auth_key=False):\n return cls(\n ops=pwhash.argon2i.OPSLIMIT_SENSITIVE,\n mem=pwhash.argon2i.MEMLIMIT_SENSITIVE,\n construct='argon2i',\n salt_key_enc=utils.random(pwhash.argon2i.SALTBYTES),\n salt_key_sig=utils.random(pwhash.argon2i.SALTBYTES) if enable_auth_key else b'',\n key_size_enc=secret.SecretBox.KEY_SIZE,\n key_size_sig=64 if enable_auth_key else 0\n )", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vpc_region: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n dry_run: Optional[pulumi.Input[bool]] = None,\n load_balancer_id: Optional[pulumi.Input[str]] = None,\n security_group_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def parser() -> any:\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('-p', '--provider', choices=['aws', ], required=True, help='provider')\n arg_parser.add_argument('-t', '--testfile', nargs='+', required=True, help='test file')\n arg_parser.add_argument('-d', '--definitions', help='custom definitions path')\n arg_parser.add_argument('-l', '--logfile', help='redirect the output to a log file')\n arg_parser.add_argument('-s', '--silent', help='do not output results', action='store_true', default=False)\n arg_parser.add_argument('-o', '--output', help='save the resource info into the specified directory')\n arg_parser.add_argument('--debug', help='show debug', action='store_true', default=False)\n return arg_parser.parse_args()", "def _cli() -> GooeyParser:\n parser = GooeyParser(description='A template CLI app with optional GUI.')\n\n return parser", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[pulumi.InputType['ConfigArgs']]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backup_pool: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n failover_ratio: Optional[pulumi.Input[float]] = None,\n health_checks: Optional[pulumi.Input[str]] = None,\n instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n session_affinity: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(self, access_key, secret_key, bucket):\n # amazon s3 connection\n self.s3_conn = None\n self.__set_amazon_s3_service__(access_key, secret_key)\n\n # data bucket to be used\n self.bucket = self.s3_conn.get_bucket(bucket)", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n metadata_endpoint: Optional[pulumi.Input[str]] = None,\n opid: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n use_in_api_documentation: Optional[pulumi.Input[bool]] = None,\n use_in_test_console: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def get_aioboto3_version() -> str:\n try:\n from aioboto3 import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aioboto3 is not installed\")\n\n return version", "def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i", "def get_conn():\n global S3Conn\n\n S3Conn = tinys3.Connection(plug.options['aws_access_key'],\n plug.options['aws_secret_key'],\n default_bucket=plug.options['bucket'], tls=True)\n # Check that the given bucket exists by doing a HEAD request\n try:\n S3Conn.head_bucket()\n except requests.HTTPError as httpe:\n err = u\"Cannot reach Onitu bucket {}\".format(plug.options['bucket'])\n if httpe.response.status_code == 404:\n err += u\": The bucket doesn't exist.\"\n if httpe.response.status_code == 403:\n err += u\": Invalid credentials.\"\n err += u\" Please check your Amazon S3 configuration - {}\".format(httpe)\n raise DriverError(err)\n plug.logger.debug(\"Connection with Amazon S3 account successful\")\n return S3Conn", "def run_aws_cli_command(*args: str):\n cli_driver = create_clidriver()\n\n f_stderr = io.StringIO()\n with redirect_stderr(f_stderr):\n return_code = cli_driver.main(args=args)\n\n stderr = f_stderr.getvalue()\n f_stderr.close()\n\n if return_code:\n raise AWSCLIException(args=args, error_code=return_code, stderr=stderr)", "def __init__(__self__,\n resource_name: str,\n args: PrivateCloudArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def get_s3_client():\n return boto3.resource('s3')", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def inst(cls):\n if cls.instance is None:\n raise OptionsError(\"No options have been set\")\n return cls.instance", "def get_aiobotocore_version() -> str:\n try:\n from aiobotocore import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aiobotocore is not installed\")\n return version", "def main(input_args):\n logging.basicConfig(level=logging.INFO)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--create-ami', action='store_true',\n default=False,\n help='Creates an AMI instead of deploying an EC2 instance')\n parser.add_argument(\n '--dry-run', action='store_true',\n default=False,\n help='Do not create resources')\n parser.add_argument(\n '--include-apps', action='append',\n default=[],\n help='Assume other apps have already been deployed')\n parser.add_argument(\n '--local-docker', action='store_true',\n default=False,\n help='Start apps using the docker daemon on the machine'\\\n' executing the script')\n parser.add_argument(\n '--skip-create-network', action='store_true',\n default=False,\n help='Assume network resources have already been provisioned')\n parser.add_argument(\n '--prefix', action='store',\n default=None,\n help='prefix used to tag the resources created'\\\n ' (defaults to config name)')\n parser.add_argument(\n '--config', action='store',\n default=os.path.join(os.getenv('HOME'), '.aws', APP_NAME),\n help='configuration file')\n\n args = parser.parse_args(input_args[1:])\n run_config(args.config,\n create_ami=args.create_ami,\n local_docker=args.local_docker,\n include_apps=args.include_apps,\n skip_create_network=args.skip_create_network,\n tag_prefix=args.prefix,\n dry_run=args.dry_run)", "def __init__(self, repo_config: Repository, s3_client: Client, s3_bucket: str):\n self.repo_config = repo_config\n self.s3_client = s3_client\n self.s3_bucket = s3_bucket", "def __init__(self, *, bucket_arn: typing.Optional[str]=None, bucket_domain_name: typing.Optional[str]=None, bucket_dual_stack_domain_name: typing.Optional[str]=None, bucket_name: typing.Optional[str]=None, bucket_regional_domain_name: typing.Optional[str]=None, bucket_website_new_url_format: typing.Optional[bool]=None, bucket_website_url: typing.Optional[str]=None, encryption_key: typing.Optional[aws_cdk.aws_kms.IKey]=None):\n self._values = {\n }\n if bucket_arn is not None: self._values[\"bucket_arn\"] = bucket_arn\n if bucket_domain_name is not None: self._values[\"bucket_domain_name\"] = bucket_domain_name\n if bucket_dual_stack_domain_name is not None: self._values[\"bucket_dual_stack_domain_name\"] = bucket_dual_stack_domain_name\n if bucket_name is not None: self._values[\"bucket_name\"] = bucket_name\n if bucket_regional_domain_name is not None: self._values[\"bucket_regional_domain_name\"] = bucket_regional_domain_name\n if bucket_website_new_url_format is not None: self._values[\"bucket_website_new_url_format\"] = bucket_website_new_url_format\n if bucket_website_url is not None: self._values[\"bucket_website_url\"] = bucket_website_url\n if encryption_key is not None: self._values[\"encryption_key\"] = encryption_key", "def get_instance_output(instance: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:\n ...", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"file_shares\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"kms_key_name\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"networks\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"status_message\"] = None\n __props__.__dict__[\"suspension_reasons\"] = None\n __props__.__dict__[\"tier\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def __init__(__self__,\n resource_name: str,\n args: ScriptArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def __init__(self, memory=4000):\n self.session = boto3.Session()\n self.batch_client = self.session.client(\"batch\")\n self.logs_client = self.session.client(\"logs\")\n self.memory = memory" ]
[ "0.7120333", "0.6446755", "0.53901947", "0.53732514", "0.5315737", "0.5265874", "0.5252197", "0.522644", "0.5211605", "0.521066", "0.5205573", "0.51422757", "0.5054793", "0.5037921", "0.5032993", "0.50301266", "0.49558958", "0.49513885", "0.49364442", "0.491438", "0.49119216", "0.4907558", "0.489213", "0.4874849", "0.4851639", "0.4844455", "0.48054138", "0.4789765", "0.4789765", "0.4789765", "0.4789765", "0.4789765", "0.4789765", "0.47881734", "0.47684053", "0.47539365", "0.4737932", "0.47308198", "0.47287837", "0.47197443", "0.47185835", "0.4707916", "0.47066557", "0.47034925", "0.46905604", "0.46754158", "0.46685168", "0.4661828", "0.46455577", "0.464419", "0.46405876", "0.46401554", "0.46376687", "0.46296415", "0.4619123", "0.46158692", "0.46132213", "0.4608563", "0.45948526", "0.45945263", "0.45845377", "0.45830134", "0.4581225", "0.45794705", "0.4571589", "0.4569982", "0.4554279", "0.45537245", "0.45505232", "0.45489034", "0.45402887", "0.4525247", "0.45219898", "0.45201105", "0.45171505", "0.45103744", "0.4507734", "0.450612", "0.45056474", "0.45036888", "0.45019367", "0.4496425", "0.4495108", "0.4488137", "0.44842175", "0.44781047", "0.44750515", "0.44725975", "0.44661596", "0.4465723", "0.44657084", "0.44620863", "0.44602564", "0.44576785", "0.44568756", "0.44513473", "0.444346", "0.44392583", "0.44346994", "0.4433661" ]
0.81121886
0
Return a usable Boto3 object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto3(args=None, logger=None, stats=None): return Boto3(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto(args=None, logger=None, stats=None):\n return Boto(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def get_s3_client(args: argparse.Namespace) -> botocore.clients.s3:\n\n assert args.s3_region_name is not None, \"set COMPSYN_S3_REGION_NAME\"\n assert args.s3_access_key_id is not None, \"set COMPSYN_S3_ACCESS_KEY_ID\"\n assert args.s3_secret_access_key is not None, \"set COMPSYN_S3_SECRET_ACCESS_KEY\"\n assert args.s3_bucket is not None, \"set COMPSYN_S3_BUCKET\"\n\n return boto3.session.Session().client(\n \"s3\",\n region_name=args.s3_region_name,\n endpoint_url=args.s3_endpoint_url,\n aws_access_key_id=args.s3_access_key_id,\n aws_secret_access_key=args.s3_secret_access_key,\n )", "def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}", "def get_aioboto3_version() -> str:\n try:\n from aioboto3 import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aioboto3 is not installed\")\n\n return version", "def get_s3_client():\n return boto3.resource('s3')", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n if args['env'] == 'S3' and args['aws_region'] != '':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n logger.info('Check to see whether s3 bucket exits...')\n try:\n s3.meta.client.head_bucket(Bucket=args['bucket_name'])\n logger.info(f\"S3 bucket {args['bucket_name']} exits...\")\n except Exception as e:\n logger.warn(f\"Bucket {args['bucket_name']} doesn't exist...\")\n logger.info('Creating bucket...')\n create_s3_bucket(s3_client, args['bucket_name'], args['aws_region'])\n\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n sas_jar_ver = config['APP']['sas_jar_ver']\n os.environ['SAS_JAR'] = \".\".join(sas_jar_ver.split('.')[:-1])\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n data_dir = config['DOCKER']['data_dir']\n path = config['DOCKER']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['DOCKER']['dict_dir']\n files = json.loads(config['DOCKER']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['DOCKER']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['DOCKER']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['DOCKER']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n path = config['S3']['s3_sas_key']\n dict_dir = config['S3']['s3_dict_key']\n csv_dir = config['S3']['s3_csv_key']\n sas_file_path = os.path.join(\"s3a://\", bucket, csv_dir, path)\n files = json.loads(config['S3']['input_files'])\n airport_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['airports_file'])\n demographic_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['us_demographics_file'])\n dictionary_file = os.path.join(\"s3a://\", bucket, config['S3']['dictionary_file'])\n output_dir = os.path.join(\"s3a://\", bucket, config['S3']['output_dir'])\n else:\n base_dir = config['LOCAL']['base_dir']\n data_dir = config['LOCAL']['data_dir']\n path = config['LOCAL']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['LOCAL']['dict_dir']\n files = json.loads(config['LOCAL']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['LOCAL']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['LOCAL']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['LOCAL']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('ETL parsing has started...')\n logger.info(\"Create output dir if it doesn't exist...\")\n if args['env'] != 'S3':\n pathlib.Path(output_dir).mkdir(exist_ok=True)\n else:\n # config.set('S3', 's3_bucket_name', args['bucket_name'])\n # s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['config_dir'], Body=)\n s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['output_dir'])\n logger.info('Created S3 bucket...')\n \n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info('Register UDFs...')\n \n spark.udf.register('SASDateConverter', sas_date_converter, Date())\n logger.info('Register sas_date_converter UDF...')\n\n # change_date_format_1 = F.udf(lambda x: datetime.strptime(x.strip(), '%Y%m%d'), Date())\n # change_date_format_2 = F.udf(lambda x: datetime.strptime(x.strip(), '%m%d%Y'), Date())\n dt = F.udf(change_date_format, Date())\n\n logger.info('Read and concatenate the raw SAS files...')\n dfs = []\n for file in files:\n try:\n df = spark.read.format('com.github.saurfang.sas.spark')\\\n .load(os.path.join(sas_file_path, file))\n dfs.append(df)\n except Exception as e:\n logger.info(f'File {file} is not available. Skipping...')\n logger.info(f'Read {len(files)} files successfully...')\n df = []\n if len(dfs) > 0:\n df = concat_df(*dfs)\n logger.info(f'Successfully concatenated {len(files)}...')\n if not isinstance(df, list):\n # SAS raw data table creation begins here\n cols = ['cicid', 'i94yr', 'i94mon', 'i94port', 'i94mode', 'visapost', \n 'entdepa', 'entdepd', 'entdepu', 'matflag', \n 'dtadfile', 'dtaddto']\n parquet_tables = ['i94_immigrations', 'i94_trips', 'i94_visitors', 'i94_flights']\n f_transforms = [i94_immigrations, i94_trips, i94_visitors, i94_flights]\n res_df = None\n for table, f_transform in zip(parquet_tables, f_transforms):\n if table == 'i94_immigrations':\n # only table not using spark sql\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=None, cols=cols,\n udf=dt, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n elif table == 'i94_flights':\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='csv',\n is_partition=False,\n is_overwrite=True,\n crate_date_df=False)\n else:\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n if table == 'i94_trips':\n table = 'i94_dates'\n create_and_write_df(res_df, table, i94_dates, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n # Reference data for airports and us city demographics begins here\n airport_df = spark.createDataFrame([], R([]))\n demographic_df = spark.createDataFrame([], R([]))\n logger.info('Read the airports reference file...')\n try:\n airport_df = spark.read.option('header', True) \\\n .csv(airport_file)\n except Exception as e:\n logger.error(f'File {airport_file} is not available. Skipping...')\n\n logger.info('Read the US demographics reference file...')\n try:\n demographic_df = spark.read.options(header='True', delimiter=';') \\\n .csv(demographic_file) \n except Exception as e:\n logger.error(f'File {demographic_file} is not available. Skipping...')\n if airport_df.count() > 0 and demographic_df.count() > 0: \n csv_tables = ['i94_airports', 'i94_us_states_demographic', \n 'i94_us_cities_demographic']\n f_transforms = [i94_airports, i94_us_states_demographic, i94_us_cities_demographic]\n csv_dfs = [airport_df, demographic_df, demographic_df]\n for table, f_transform, df in zip(csv_tables, f_transforms, csv_dfs):\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=dt, fmt='csv',\n is_partition=False,\n is_overwrite=True)\n\n # SAS reference data creation begins here\n ref_csv_tables = ['i94_countries', 'i94_port_state_mapping', 'i94_travel_mode', \n 'i94_state_mapping', 'i94_visa']\n table_pos_dict = {\n 'i94_countries': [2, 3, 'country', 'country_id'],\n 'i94_port_state_mapping': [3, 4, 'city', 'i94_port'],\n 'i94_travel_mode': [4, 5, 'mode', 'mode_id'],\n 'i94_state_mapping': [5, 6, 'state', 'state_id'],\n 'i94_visa': [6, 7, 'visa_purpose', 'visa_id']\n }\n logger.info('Read the SAS data dictionary reference file...') \n for table in ref_csv_tables:\n create_and_write_ref_df(dictionary_file, table, output_dir, spark, \n fmt='csv', start_pos=table_pos_dict[table][0], \n end_pos=table_pos_dict[table][1],\n col_name=table_pos_dict[table][2], \n index_name=table_pos_dict[table][3],\n is_partition=False,\n is_overwrite=True)\n\n logger.info('ETL parsing has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def __init__(self, bucket, aws_profile=None, logger=None):\n self.bucket = bucket\n self.s3helper = S3Helper(aws_profile=aws_profile)\n self.print_func = print\n if logger:\n self.print_func = logger.info", "def get_s3_args(\n parser: Optional[argparse.ArgumentParser] = None,\n) -> argparse.ArgumentParser:\n\n if parser is None:\n parser = argparse.ArgumentParser()\n\n s3_parser = parser.add_argument_group(\"s3\")\n\n s3_parser.add_argument(\n \"--s3-bucket\",\n type=str,\n action=env_default(\"COMPSYN_S3_BUCKET\"),\n required=False,\n help=\"bucket where img data is stored in S3\",\n )\n s3_parser.add_argument(\n \"--s3-region-name\",\n type=str,\n required=False,\n action=env_default(\"COMPSYN_S3_REGION_NAME\"),\n help=\"S3 region\",\n )\n s3_parser.add_argument(\n \"--s3-endpoint-url\",\n action=env_default(\"COMPSYN_S3_ENDPOINT_URL\"),\n required=False,\n help=\"S3 endpoint URL (only required for non-AWS S3)\",\n )\n s3_parser.add_argument(\n \"--s3-access-key-id\",\n type=str,\n action=env_default(\"COMPSYN_S3_ACCESS_KEY_ID\"),\n required=False,\n )\n s3_parser.add_argument(\n \"--s3-secret-access-key\",\n type=str,\n action=env_default(\"COMPSYN_S3_SECRET_ACCESS_KEY\"),\n required=False,\n )\n\n return parser", "def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()", "def _get_client(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3Client:\n return session.client(\"s3\") if session else boto3.client(\"s3\", region_name=region)", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def get_conn():\n global S3Conn\n\n S3Conn = tinys3.Connection(plug.options['aws_access_key'],\n plug.options['aws_secret_key'],\n default_bucket=plug.options['bucket'], tls=True)\n # Check that the given bucket exists by doing a HEAD request\n try:\n S3Conn.head_bucket()\n except requests.HTTPError as httpe:\n err = u\"Cannot reach Onitu bucket {}\".format(plug.options['bucket'])\n if httpe.response.status_code == 404:\n err += u\": The bucket doesn't exist.\"\n if httpe.response.status_code == 403:\n err += u\": Invalid credentials.\"\n err += u\" Please check your Amazon S3 configuration - {}\".format(httpe)\n raise DriverError(err)\n plug.logger.debug(\"Connection with Amazon S3 account successful\")\n return S3Conn", "def boto_client(account_id, service_name, region):\n logger.info('Creating boto3 client for account_id: {}, '\n 'service_name: {}'.format(account_id, service_name))\n return boto3.client(service_name, region_name=region)", "def get_boto3_version() -> str:\n return boto3_version", "def __get_s3_client(self):\n if self.AWS_ACCESS_KEY:\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=self.AWS_ACCESS_KEY,\n aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,\n )\n else:\n s3_client = boto3.client(\"s3\")\n return s3_client", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def s3_client(self):\n return boto3.client('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def mock_s3_boto_returns() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockReturns()\n\n return client", "def get_boto3_client(\n *,\n aws_lambda_mode: bool,\n service_name: str,\n profile_name: str = 'kreodont',\n connect_timeout: float = 0.2,\n read_timeout: float = 0.4,\n) -> Optional[boto3.client]:\n known_services = ['translate', 'dynamodb', 's3']\n if service_name in global_cached_boto3_clients:\n print(f'{service_name} client taken from cache!')\n return global_cached_boto3_clients[service_name]\n\n if service_name not in known_services:\n raise Exception(\n f'Not known service '\n f'name {service_name}. The following '\n f'service names known: {\", \".join(known_services)}')\n\n if aws_lambda_mode:\n client = boto3.client(\n service_name,\n config=botocore.client.Config(\n connect_timeout=connect_timeout,\n read_timeout=read_timeout,\n parameter_validation=False,\n retries={'max_attempts': 0},\n ),\n )\n else:\n client = boto3.Session(profile_name=profile_name).client(service_name)\n return client\n\n # saving to cache to to spend time to create it next time\n global_cached_boto3_clients[service_name] = client\n return client", "def _get_buckets():\n\n return __opts__[\"s3.buckets\"] if \"s3.buckets\" in __opts__ else {}", "def __init__(self):\n self.aws = AWS()", "def get_elb(args=None, logger=None, stats=None):\n if not args:\n parser = get_parser()\n add_elb_cli_arguments(parser)\n args = parser.parse_args()\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n boto = Boto3(\n log_level=args.boto_log_level,\n access_key=args.boto_access_key,\n secret_key=args.boto_secret_key,\n region=args.boto_region,\n logger=logger,\n stats=stats,\n )\n return ELB(\n boto=boto,\n logger=logger,\n stats=stats,\n )", "def extract_aws_metadata(wrapped, instance, args, kwargs, return_value):\n response = return_value\n LOGGER.debug(\n \"Extracting AWS metadata\", args=args, kwargs=kwargs,\n )\n if \"operation_name\" in kwargs:\n operation_name = kwargs[\"operation_name\"]\n else:\n operation_name = args[0]\n\n # Most of the time the actual keyword arguments to the client call are\n # passed in as a positial argument after the operation name.\n if len(kwargs) == 0 and len(args) == 2:\n kwargs = args[1]\n\n region_name = instance._client_config.region_name\n\n response_metadata = response.get(\"ResponseMetadata\")\n\n metadata = {\"aws\": {\"operation\": operation_name, \"region\": region_name}}\n\n if \"TableName\" in kwargs:\n metadata[\"aws\"][\"table_name\"] = kwargs[\"TableName\"]\n if \"QueueUrl\" in kwargs:\n metadata[\"aws\"][\"queue_url\"] = kwargs[\"QueueUrl\"]\n\n if response_metadata is not None:\n metadata[\"http\"] = {\n \"response\": {\"status\": response_metadata[\"HTTPStatusCode\"]},\n }\n metadata[\"aws\"][\"request_id\"] = response_metadata[\"RequestId\"]\n\n return metadata", "def get_bucket(bucket):\n if isinstance(bucket, Bucket):\n return bucket\n if isinstance(bucket, str):\n return setup_bucket(bucket)\n else:\n raise TypeError(\"Expected bucket to be Bucket or str was %s \" % type(bucket))", "def _get_s3(key=None, username=None, secret=None, password=None, **kwargs):\n if username is not None:\n if key is not None:\n raise KeyError(\"S3 storage options got secrets argument \"\n \"collision. Please, use either `key` \"\n \"storage option or password field in URLpath, \"\n \"not both options together.\")\n key = username\n if key is not None:\n kwargs['key'] = key\n if password is not None:\n if secret is not None:\n raise KeyError(\"S3 storage options got secrets argument \"\n \"collision. Please, use either `secret` \"\n \"storage option or password field in URLpath, \"\n \"not both options together.\")\n secret = password\n if secret is not None:\n kwargs['secret'] = secret\n return S3FileSystem(**kwargs)", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def _get_s3_key():\n\n key = __opts__[\"s3.key\"] if \"s3.key\" in __opts__ else None\n keyid = __opts__[\"s3.keyid\"] if \"s3.keyid\" in __opts__ else None\n service_url = __opts__[\"s3.service_url\"] if \"s3.service_url\" in __opts__ else None\n verify_ssl = __opts__[\"s3.verify_ssl\"] if \"s3.verify_ssl\" in __opts__ else None\n kms_keyid = __opts__[\"aws.kmw.keyid\"] if \"aws.kms.keyid\" in __opts__ else None\n location = __opts__[\"s3.location\"] if \"s3.location\" in __opts__ else None\n path_style = __opts__[\"s3.path_style\"] if \"s3.path_style\" in __opts__ else None\n https_enable = (\n __opts__[\"s3.https_enable\"] if \"s3.https_enable\" in __opts__ else None\n )\n\n return (\n key,\n keyid,\n service_url,\n verify_ssl,\n kms_keyid,\n location,\n path_style,\n https_enable,\n )", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def __init__(self):\n super(AWSBase, self).__init__()\n self.region = config.ENV_DATA['region']\n self.aws = AWSUtil(self.region)", "def __initiate_s3client():\n boto3.setup_default_session(region_name=env.get('region'))\n s3client = boto3.client(\n 's3',\n aws_access_key_id=env.get('access_key_id'),\n aws_secret_access_key=env.get('secret_access_key')\n )\n return s3client", "def _get_resource(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3ServiceResource:\n return (\n session.resource(\"s3\") if session else boto3.resource(\"s3\", region_name=region)\n )", "def cli(profile, region, clear):\n global SESSION, BUCKET_MANAGER, DOMAIN_MANAGER, CERT_MANAGER, \\\n DIST_MANAGER, EC2_MANAGER, ECS_MANAGER\n session_cfg = {}\n if profile:\n session_cfg['profile_name'] = profile\n\n if region:\n session_cfg['region_name'] = region\n\n if clear:\n util.clear_scr()\n\n# using **<variable> python expands it as a parameter=content\n SESSION = boto3.Session(**session_cfg)\n BUCKET_MANAGER = BucketManager(SESSION)\n DOMAIN_MANAGER = DomainManager(SESSION)\n CERT_MANAGER = CertificateManager(SESSION)\n DIST_MANAGER = DistributionManager(SESSION)\n EC2_MANAGER = EC2Manager(SESSION)\n ECS_MANAGER = ECSManager(SESSION)", "def mock_amazon():\n amazon = Amazon()\n amazon.carrot1 = 'cenoura normal'\n amazon.carrot2 = 'cenoura radioativa'\n amazon.carrot_number = 575\n return amazon", "def boto3_client(service='ec2'):\n\n def wrapper(func):\n @wraps(func)\n def wrapped_func(user, *args, **kwargs):\n region = request.args.get('region') or 'eu-west-1'\n client = get_client(user['access_key'], user['secret_key'],\n region=region, service=service)\n return func(user, client, *args, **kwargs)\n\n return wrapped_func\n\n return wrapper", "def __init__(self, access_key, secret_key, bucket):\n # amazon s3 connection\n self.s3_conn = None\n self.__set_amazon_s3_service__(access_key, secret_key)\n\n # data bucket to be used\n self.bucket = self.s3_conn.get_bucket(bucket)", "def __init__(self, repo_config: Repository, s3_client: Client, s3_bucket: str):\n self.repo_config = repo_config\n self.s3_client = s3_client\n self.s3_bucket = s3_bucket", "def __init__(self, name: str, args: S3Args, opts: ResourceOptions = None):\n super().__init__(\"custom:resource:S3\", name, {}, opts)\n \"\"\"Override ComponentResource class constructor\"\"\"\n\n self.bucket_final = Output.all(\n args.project_name,\n args.bucket_name\n ).apply(\n lambda arg: f\"{arg[0]}-{arg[1]}\"\n )\n\n self.bucket = aws.s3.Bucket(\n args.bucket_name,\n bucket=self.bucket_final,\n acl=\"private\",\n tags={\n \"BillingCode\": args.billing_code,\n \"Name\": self.bucket_final,\n \"Project\": args.project_name,\n },\n server_side_encryption_configuration={\n \"rule\": {\n \"applyServerSideEncryptionByDefault\": {\n \"sseAlgorithm\": \"AES256\",\n },\n },\n },\n opts=ResourceOptions(parent=self)\n )\n\n self.deny_vpce_policy = Output.all(\n args.ec2_role_arn,\n self.bucket.arn,\n args.vpc_endpoint_id\n ).apply(\n lambda args:\n aws.iam.get_policy_document(\n version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Access-to-specific-VPCE-only\",\n principals=[\n aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n identifiers=[args[0]],\n type=\"AWS\",\n )\n ],\n actions=[\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:ListBucket\",\n \"s3:PutObject\",\n \"s3:RestoreObject\",\n ],\n effect=\"Deny\",\n resources=[\n args[1],\n args[1]+\"/*\"\n ],\n conditions=[\n aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"StringNotEquals\",\n values=[args[2]],\n variable=\"aws:sourceVpce\",\n )\n ],\n )\n ],\n opts=ResourceOptions(parent=self.bucket)\n )\n )\n\n admin_principals = []\n for admin in args.admin_list:\n admin_principals.append(f\"arn:aws:iam::{current_id}:user/{admin}\")\n\n self.admin_access_policy = Output.all(self.bucket.arn).apply(\n lambda args:\n aws.iam.get_policy_document(\n version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"admin-access\",\n principals=[\n aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n identifiers=admin_principals,\n type=\"AWS\",\n )\n ],\n actions=[\"s3:*\"],\n effect=\"Allow\",\n resources=[\n args[0],\n args[0]+\"/*\"\n ],\n )\n ],\n opts=ResourceOptions(parent=self.bucket)\n )\n )\n\n self.policy = aws.s3.BucketPolicy(\n f'{args.bucket_name}-policy',\n bucket=self.bucket.id,\n policy=aws.iam.get_policy_document(\n source_json=self.deny_vpce_policy.json,\n override_json=self.admin_access_policy.json,\n ).json,\n opts=ResourceOptions(parent=self.bucket)\n )\n\n self.register_outputs({})", "def read_command_line():\n\n parser = argparse.ArgumentParser(\n description='Mount an S3 bucket as a read-only filesystem')\n\n # All arguments must default to None so that they can be filtered\n # out of the returned dictionary; otherwise, the argument defaults\n # will override settings from the configuration file.\n parser.add_argument('mount-point',\n help='where to mount the bucket')\n parser.add_argument('--bucket', dest='bucket',\n help='S3 bucket to mount')\n parser.add_argument('--access-key', dest='access-key',\n help='access key for the bucket')\n parser.add_argument('--secret-key', dest='secret-key',\n help='secret key for the bucket')\n\n parser.add_argument('--config-file', dest='config-file',\n default='~/.s3viewport.yaml',\n help='path to the configuration file')\n\n parser.add_argument('--no-input', dest='no-input',\n action='store_true', default=None,\n help=\"don't prompt for missing information\")\n parser.add_argument('--foreground', dest='foreground',\n action='store_true', default=None,\n help='run filesystem server in the foreground')\n\n # TODO: Describe configuration file format\n\n args = parser.parse_args()\n return filter_dict(vars(args), lambda k, v: v is not None)", "def get_s3_client(profile_name):\n try:\n session = boto3.session.Session(profile_name=profile_name)\n except ProfileNotFound as e:\n print(e, file=sys.stderr)\n raise FailureException from e\n return session.resource('s3')", "def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,\n kwargs,\n name=get_service_name,\n namespace=\"aws\",\n metadata_extractor=extract_aws_metadata,\n error_handling_type=ERROR_HANDLING_BOTOCORE,\n )", "def __init__(__self__, resource_name, opts=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['aws_kms_key_arn'] = aws_kms_key_arn\n __props__['content_config'] = content_config\n __props__['content_config_permissions'] = content_config_permissions\n if input_bucket is None:\n raise TypeError(\"Missing required property 'input_bucket'\")\n __props__['input_bucket'] = input_bucket\n __props__['name'] = name\n __props__['notifications'] = notifications\n __props__['output_bucket'] = output_bucket\n if role is None:\n raise TypeError(\"Missing required property 'role'\")\n __props__['role'] = role\n __props__['thumbnail_config'] = thumbnail_config\n __props__['thumbnail_config_permissions'] = thumbnail_config_permissions\n __props__['arn'] = None\n super(Pipeline, __self__).__init__(\n 'aws:elastictranscoder/pipeline:Pipeline',\n resource_name,\n __props__,\n opts)", "def boto_init_s3(bucket_name):\n c = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n b = c.get_bucket(bucket_name)\n\n return b", "def get_object(Bucket=None, IfMatch=None, IfModifiedSince=None, IfNoneMatch=None, IfUnmodifiedSince=None, Key=None, Range=None, ResponseCacheControl=None, ResponseContentDisposition=None, ResponseContentEncoding=None, ResponseContentLanguage=None, ResponseContentType=None, ResponseExpires=None, VersionId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None, PartNumber=None):\n pass", "def get_config():\n\n # Retrieve config from S3\n config = get_json(BUCKET, CONFIG_S3_KEY)\n\n # If config does not currently exist, create a default one and save to S3\n if not config:\n config = generate_config()\n save_config(config)\n\n return config", "def fetch_boto3_client(service_name: str):\n region_name = load_aws_region_name()\n cache_key = f\"{region_name}-{service_name}\"\n\n if CLIENT_CACHE.get(cache_key):\n return CLIENT_CACHE[cache_key]\n\n config = Config(\n region_name=region_name,\n signature_version=\"v4\",\n retries={\"max_attempts\": 10, \"mode\": \"standard\"},\n )\n client = boto3.client(service_name, config=config) # type: ignore\n\n CLIENT_CACHE[cache_key] = client\n\n return client", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def aws():\n pass", "def stubbed_s3_client():\n stubber = stub.Stubber(app.S3_CLIENT)\n\n yield stubber\n\n stubber.deactivate()", "def s3_resource(self):\n return boto3.resource('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def __init__(self,region=None, bucket=None, keyPrefix='logs', fqdn=None):\n object.__init__(self)\n \n if (not region):\n raise MissingArgumentException(\"The AWS region name must be provided.\")\n #endIf\n self.region = region\n \n if (not bucket):\n raise MissingArgumentException(\"The S3 bucket name for the exported logs must be provided.\")\n #endIf\n self.bucket = bucket\n \n self.keyPrefix = keyPrefix\n \n if (not fqdn):\n raise MissingArgumentException(\"The FQDN of the node exporting the logs must be provided.\")\n #endIf\n self.fqdn = fqdn\n \n self.s3Helper = S3Helper(region=region)\n \n if (not self.s3Helper.bucketExists(bucket)):\n self.s3Helper.createBucket(bucket,region=region)\n #endIf", "def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None,\n host=None, debug=0, https_connection_factory=None,\n calling_format=None, path='/', provider='aws',\n bucket_class=None, mock_s3_fs=None):\n # use mock_s3_fs even if it's {}\n self.mock_s3_fs = combine_values({}, mock_s3_fs)\n self.endpoint = host or 's3.amazonaws.com'", "def get_boto3_bucket(bucket_name): # pragma: no cover\n raise DeprecationWarning(\"get_boto3_bucket() is deprecated\")", "def botoconn(args):\n try:\n return boto.ec2.autoscale.connect_to_region(args.region)\n except:\n print(\"FATAL ERROR:\")\n traceback.print_exc(file=sys.stdout)\n sys.exit(\"Failed to connect to AWS. Did you set the shell vars right?\")", "def make_sdk(options=None, **kwargs):\n from openstack import connection\n cloud = get_config(options=options, **kwargs)\n return connection.from_config(cloud_config=cloud, options=options)", "def __init__(self, *, bucket_arn: typing.Optional[str]=None, bucket_domain_name: typing.Optional[str]=None, bucket_dual_stack_domain_name: typing.Optional[str]=None, bucket_name: typing.Optional[str]=None, bucket_regional_domain_name: typing.Optional[str]=None, bucket_website_new_url_format: typing.Optional[bool]=None, bucket_website_url: typing.Optional[str]=None, encryption_key: typing.Optional[aws_cdk.aws_kms.IKey]=None):\n self._values = {\n }\n if bucket_arn is not None: self._values[\"bucket_arn\"] = bucket_arn\n if bucket_domain_name is not None: self._values[\"bucket_domain_name\"] = bucket_domain_name\n if bucket_dual_stack_domain_name is not None: self._values[\"bucket_dual_stack_domain_name\"] = bucket_dual_stack_domain_name\n if bucket_name is not None: self._values[\"bucket_name\"] = bucket_name\n if bucket_regional_domain_name is not None: self._values[\"bucket_regional_domain_name\"] = bucket_regional_domain_name\n if bucket_website_new_url_format is not None: self._values[\"bucket_website_new_url_format\"] = bucket_website_new_url_format\n if bucket_website_url is not None: self._values[\"bucket_website_url\"] = bucket_website_url\n if encryption_key is not None: self._values[\"encryption_key\"] = encryption_key", "def create_boto_session(account):\n aws_access_key_id = account['aws_access_key_id']\n aws_secret_access_key = account['aws_secret_access_key']\n region = account['region']\n #aws_profile = account['aws_profile']\n\n\n session = boto3.Session(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n region_name=region,\n #profile_name=aws_profile,\n )\n\n return session", "def s3(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"s3\")", "def get_s3_connection(self):\n return connection.S3Connection(\n config.get('nereid_s3', 'access_key'),\n config.get('nereid_s3', 'secret_key')\n )", "def setup_s3_client(job_data):\n key_id = job_data['artifactCredentials']['accessKeyId']\n key_secret = job_data['artifactCredentials']['secretAccessKey']\n session_token = job_data['artifactCredentials']['sessionToken']\n\n session = Session(aws_access_key_id=key_id,\n aws_secret_access_key=key_secret,\n aws_session_token=session_token)\n return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))", "def __init__(\n self, source, history_path, profile_name, print_request_time=False, output_format=formatter.JSON, headers=None\n ):\n self.history_path = history_path\n self.output_format = output_format\n self.print_request_time = print_request_time\n self.profile_name = profile_name\n self.config_file_path = os.path.join(os.path.expanduser(\"~\"), \".open-cli3-config/config.cfg\")\n\n self.logger = logging.getLogger(\"open-cli3\")\n self.logger.debug(\n \"Creating a python client based on %s, headers: %s\", source, headers\n )\n\n headers = self._parse_headers(headers)\n\n # parse profile_name and/or source attributes\n endpoint_opt = CONFIG_OPTIONS[0]\n if profile_name:\n config_obj = self._get_config_object(self.config_file_path)\n endpoint = \"\"\n if config_obj:\n endpoint = self._get_option_from_config_obj(config_obj, endpoint_opt)\n else:\n self.logger.debug(\"You don't have open-cli3 config file, so we will use source attribute instead\")\n if source:\n endpoint = source\n else:\n self.logger.debug(\"You don't have open-cli3 config file for profile name and you additionally \"\n \"didn't provide source attribute instead\")\n elif source:\n endpoint = source\n else:\n raise Exception(\"You should specify at least source or profile name (if exists) \"\n \"in order to run open-cli3. Check 'help' (-h, --help) for more information\")\n\n # Handle non-url sources\n spec = None\n if os.path.exists(endpoint):\n with open(endpoint) as f:\n spec = yaml.safe_load(f.read())\n\n if not spec:\n spec = requests.get(endpoint).json()\n self.client = OpenAPIExt(spec)\n\n # Get the CLI prompt name from the spec title\n self.name = self.client.info.title\n\n # Initialize a command parser based on the client\n self.command_parser = parser.CommandParser(client=self.client)", "def __init__(self, project_id, bucket_name):\n self.project_id = project_id\n self.bucket_name = bucket_name\n self.client = storage.Client(project=project_id)\n self.bucket = self.client.get_bucket(bucket_name)", "def mock_s3_boto_raise() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockRaise()\n\n return client", "def default_glacier_wrapper(args, **kwargs):\n return GlacierWrapper(args.aws_access_key,\n args.aws_secret_key,\n args.region,\n bookkeeping=args.bookkeeping,\n no_bookkeeping=args.no_bookkeeping,\n bookkeeping_domain_name=args.bookkeeping_domain_name,\n sdb_access_key=args.sdb_access_key,\n sdb_secret_key=args.sdb_secret_key,\n sdb_region=args.sdb_region,\n # sns_enable=args.sns_enable,\n # sns_topic=args.sns_topic,\n # sns_monitored_vaults=args.sns_monitored_vaults,\n # sns_options=args.sns_options,\n # config_object=args.config_object,\n logfile=args.logfile,\n loglevel=args.loglevel,\n logtostdout=args.logtostdout)", "def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass", "def __init__(__self__,\n resource_name: str,\n args: BucketACLArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def parse_args():\n logging.getLogger().debug(\"parse_args()\")\n parser = argparse.ArgumentParser(description='AWS instance health')\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='verbosity level, specify multiple')\n parser.add_argument('--clusterid', default=\"default\",\n help='clusterid')\n parser.add_argument('--namespace', default=\"default\",\n help='Project namespace')\n args = parser.parse_args()\n if args.verbose > 0:\n logging.getLogger().setLevel(logging.DEBUG)\n return args", "def connect():\n # Reduce the number of retries to 1 if it's not set already so requests\n # fail quickly rather than delaying the downloading of photos\n if not boto.config.has_option('Boto', 'num_retries'):\n if not boto.config.has_section('Boto'):\n boto.config.add_section('Boto')\n boto.config.set('Boto', 'num_retries', '1')\n cfg = settings.config()\n try:\n aws_access_key = cfg.get('s3', 'access_key')\n aws_secret_key = cfg.get('s3', 'secret_key')\n aws_s3_bucket = cfg.get('s3', 'bucket')\n except NoOptionError as e:\n l.error(\"Error reading a setting from the config.cfg file: %s\", e)\n raise\n conn = S3Connection(aws_access_key, aws_secret_key)\n bucket = conn.get_bucket(aws_s3_bucket, validate=False)\n return bucket", "def test_s3_get_bucket_info(self, mock_class):\n\n mock_class().list_objects.return_value = list(s3_fake_objects[\"bucket1\"].keys())\n mock_class().stat_object.side_effect = list(s3_fake_objects[\"bucket1\"].values())\n return_value = servicex_storage.s3_storage_manager.BucketInfo(name=\"bucket1\",\n size=60,\n last_modified=datetime.datetime(\n year=2021, month=10,\n day=1, hour=10,\n minute=10, second=10))\n test_obj = servicex_storage.s3_storage_manager.S3Store(s3_endpoint=\"abc\",\n access_key=\"abc\",\n secret_key=\"abc\")\n bucket_info = test_obj.get_bucket_info(\"bucket1\")\n self.assertEqual(bucket_info, return_value)", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def make_s3(sitename):\n return s3.S3(sitename)", "def s3_client(s3_url):\n with moto.mock_s3():\n with clients.S3Client(s3_url) as client:\n yield client", "def cli():\n parser=argparse.ArgumentParser(\n description = 'Rotate through a given AWS account for per application keys. Keys are temporarily loaded into environment variables. Asks for a SSO cookie value.')\n parser.add_argument('role', help = 'Role to harvest session keys as')\n parser.add_argument(\n '-c', '--command', help = 'Custom command to run.', default = None)\n parser.add_argument('-a', '--application',\n help = 'Provide a specific application', default = None)\n parser.add_argument(\n '-l', '--list', help = 'Provide a list of applications. Lists should be one Application#,Application Name per line', default = None)\n parser.add_argument(\n '-p', '--awspx', help = 'Run awspx across all applications. Install from https://github.com/FSecureLABS/awspx', action=argparse.BooleanOptionalAction, default = False)\n parser.add_argument(\n '-s', '--scoutsuite', help = 'Run ScoutSuite across all applications. Install from https://github.com/nccgroup/ScoutSuite', action=argparse.BooleanOptionalAction, default = False)\n args=parser.parse_args()\n\n print(\"Please provide an SSO cookie value. Obtain from the dev console on a web browser, probably named something like x-amz-sso_authn\")\n token=input()\n\n return args.role, args.list, args.application, args.command, token, args.awspx, args.scoutsuite", "def from_bucket_attributes(cls, scope: aws_cdk.core.Construct, id: str, *, bucket_arn: typing.Optional[str]=None, bucket_domain_name: typing.Optional[str]=None, bucket_dual_stack_domain_name: typing.Optional[str]=None, bucket_name: typing.Optional[str]=None, bucket_regional_domain_name: typing.Optional[str]=None, bucket_website_new_url_format: typing.Optional[bool]=None, bucket_website_url: typing.Optional[str]=None, encryption_key: typing.Optional[aws_cdk.aws_kms.IKey]=None) -> \"IBucket\":\n attrs = BucketAttributes(bucket_arn=bucket_arn, bucket_domain_name=bucket_domain_name, bucket_dual_stack_domain_name=bucket_dual_stack_domain_name, bucket_name=bucket_name, bucket_regional_domain_name=bucket_regional_domain_name, bucket_website_new_url_format=bucket_website_new_url_format, bucket_website_url=bucket_website_url, encryption_key=encryption_key)\n\n return jsii.sinvoke(cls, \"fromBucketAttributes\", [scope, id, attrs])", "def _client(region: str = \"\") -> Any:\n region_name = region or os.environ.get(\"AWS_DEFAULT_REGION\", AWS_DEFAULT_REGION)\n return boto3.client(\"sqs\", region_name=region_name)", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def getS3Object(self, bucket=None, s3Path=None, destPath=None):\n methodName = \"getS3Object\"\n \n if (not bucket):\n raise MissingArgumentException(\"An S3 bucket name (bucket) must be provided.\")\n #endIf\n \n if (not s3Path):\n raise MissingArgumentException(\"An S3 object key (s3Path) must be provided.\")\n #endIf\n \n if (not destPath):\n raise MissingArgumentException(\"A file destination path (destPath) must be provided.\")\n #endIf\n \n TR.info(methodName, \"STARTED download of object: %s from bucket: %s, to: %s\" % (s3Path,bucket,destPath))\n \n s3url = self.s3.generate_presigned_url(ClientMethod='get_object',Params={'Bucket': bucket, 'Key': s3Path},ExpiresIn=60)\n TR.fine(methodName,\"Getting S3 object with pre-signed URL: %s\" % s3url)\n #endIf\n \n destDir = os.path.dirname(destPath)\n if (not os.path.exists(destDir)):\n os.makedirs(destDir)\n TR.info(methodName,\"Created object destination directory: %s\" % destDir)\n #endIf\n \n r = requests.get(s3url, stream=True)\n with open(destPath, 'wb') as destFile:\n shutil.copyfileobj(r.raw, destFile)\n #endWith\n\n TR.info(methodName, \"COMPLETED download from bucket: %s, object: %s, to: %s\" % (bucket,s3Path,destPath))\n \n return destPath", "def s3_pickle_io_manager(init_context):\n s3_session = init_context.resources.s3\n s3_bucket = init_context.resource_config[\"s3_bucket\"]\n s3_prefix = init_context.resource_config.get(\"s3_prefix\") # s3_prefix is optional\n pickled_io_manager = PickledObjectS3IOManager(s3_bucket, s3_session, s3_prefix=s3_prefix)\n return pickled_io_manager", "def __init__(self, looking_for_tags: dict):\n self.looking_for_tags = looking_for_tags\n self.ec2 = boto3.resource('ec2')", "def _get_aws_s3_connection(cls, access_key, secret_access_key):\n return boto.connect_s3(access_key, secret_access_key)", "def assume_role():\n try:\n return boto3.client('sts')\n except Exception as error:\n logger.info(\"Creating a boto client failed with the following error : {}\".format(error))", "def __init__(__self__,\n resource_name: str,\n args: ObjectStorageKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def get_client():\n global _CLIENT\n if _CLIENT is None:\n from stsci_aws_utils.s3 import ConcurrentS3Client\n _CLIENT = ConcurrentS3Client()\n atexit.register(_CLIENT.close)\n return _CLIENT", "def provision_create(ec2_conn, iam_conn, interana_account_id, s3_bucket_path, interana_user):\n try:\n user, all_policies = check_account_setup(iam_conn, interana_user)\n except Exception, e:\n print \"Warning could not verify user interana_user {} because {}\".format(interana_user, e)\n\n infile = 's3_bucket_list.policy.template'\n outfile = 's3_bucket_list.policy'\n\n bucket_name, bucket_prefix = get_bucket_name_prefix(s3_bucket_path)\n\n all_lines = ''\n with open(infile, 'r') as tmp_fh, open(outfile, 'w') as out_fh:\n for line in tmp_fh:\n re_proxy = re.compile('<INTERANA_ACCOUNT_ID>')\n translate = re_proxy.sub(interana_account_id, line)\n\n re_proxy = re.compile('<BUCKET_NAME>')\n translate = re_proxy.sub(bucket_name, translate)\n\n re_proxy = re.compile('<BUCKET_PREFIX>')\n translate = re_proxy.sub(bucket_prefix, translate)\n\n out_fh.write(translate)\n all_lines += translate.strip()\n\n if len(bucket_prefix) < 1:\n with open(outfile, 'r') as in_fh:\n policy = json.load(in_fh)\n del policy['Statement'][1]['Condition']\n all_lines = json.dumps(policy)\n print \"Download file to check GetObject Access {}\".format(outfile)\n with open(outfile, 'w') as out_fh:\n json.dump(policy, out_fh, indent=4)\n\n print \"****policy file {}***\".format(outfile)\n\n print json.dumps(json.loads(all_lines), indent=True)", "def get_aiobotocore_version() -> str:\n try:\n from aiobotocore import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aiobotocore is not installed\")\n return version", "def __init__(self, s3_connection, bucket_name, bucket_url):\n self.s3 = s3_connection\n self.bucket_name = bucket_name\n self.bucket_url = bucket_url", "def get_s3_bucket(env):\n s3 = boto3.resource('s3')\n bucket_name = 'govuk-%s-fastly-logs' % env\n logging.info('S3 bucket name: %s', bucket_name)\n return s3.Bucket(bucket_name)", "def create_connection(bucket_name):\n conn = boto.connect_s3()\n bucket = conn.get_bucket(bucket_name)\n return conn, bucket", "def _getStorageClient(app):\n \n if config.get(\"aws_s3_gateway\"):\n log.debug(\"_getStorageClient getting S3Client\")\n client = S3Client(app)\n else:\n log.debug(\"_getStorageClient getting MemClient\")\n client = MemClient(app)\n return client", "def mock_s3_bucket():\n with moto.mock_s3():\n bucket_name = \"mock-bucket\"\n my_config = Config(region_name=\"us-east-1\")\n s3_client = boto3.client(\"s3\", config=my_config)\n s3_client.create_bucket(Bucket=bucket_name)\n yield bucket_name", "def s3(self) -> Optional['outputs.DataRepositoryAssociationS3']:\n return pulumi.get(self, \"s3\")", "def get_backend(cls, backend=None):\n return backend if backend else aws.S3Backend(\n category=cls.default_category, bucket_name=cls.default_bucket)" ]
[ "0.767378", "0.62587637", "0.5730653", "0.5656308", "0.5650132", "0.5583414", "0.5518301", "0.5486912", "0.5486354", "0.5425678", "0.54201967", "0.541126", "0.5337484", "0.5296419", "0.5290054", "0.52891475", "0.52808595", "0.5232185", "0.52223915", "0.5181535", "0.5169401", "0.51678115", "0.513606", "0.5107055", "0.5100332", "0.5096866", "0.5081632", "0.50758284", "0.5073012", "0.50668836", "0.5061571", "0.5016826", "0.5010509", "0.499977", "0.49929443", "0.49905074", "0.4972182", "0.4912176", "0.48982242", "0.48842108", "0.4882567", "0.48716354", "0.4856164", "0.48433656", "0.48422894", "0.48235863", "0.48230156", "0.48154992", "0.4807726", "0.4802404", "0.48017326", "0.47707713", "0.47668004", "0.47625515", "0.47619182", "0.47583836", "0.47531042", "0.47431558", "0.4738317", "0.47378638", "0.4730804", "0.47275138", "0.4724893", "0.47158134", "0.47144526", "0.47107112", "0.47002608", "0.46928626", "0.46801847", "0.466224", "0.4654274", "0.4646841", "0.46464366", "0.46369153", "0.4630233", "0.46268362", "0.46215487", "0.46134368", "0.46067974", "0.46057704", "0.46000382", "0.45985174", "0.4579573", "0.4573986", "0.4570798", "0.45651367", "0.45612717", "0.45582795", "0.45573086", "0.45544958", "0.4550745", "0.45439988", "0.45434558", "0.4529864", "0.45264536", "0.452627", "0.45237264", "0.45235336", "0.45217866", "0.4521478" ]
0.81879747
0
Proxies calls to ``boto.`` methods.
def __getattr__(self, attr): # This way, we don't have to write: rv = Boto().boto.some_call # But can just write: rv = Boto().some_call # This also gives us hooks for future logging/timers/etc and # extended wrapping of things the attributes return if we so # choose. self._logger.debug('Calling wrapped boto attribute: %s on %s', attr, self) attr = getattr(self._boto, attr) if callable(attr): self._logger.debug("Boto attribute '%s' is callable", attr) @wraps(attr) def wrapper(*args, **kwargs): return attr(*args, **kwargs) return wrapper return attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monkey_patch_botocore_for_xray():\n wrapt.wrap_function_wrapper(\n \"botocore.client\", \"BaseClient._make_api_call\", xray_botocore_api_call,\n )", "def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,\n kwargs,\n name=get_service_name,\n namespace=\"aws\",\n metadata_extractor=extract_aws_metadata,\n error_handling_type=ERROR_HANDLING_BOTOCORE,\n )", "def mock_s3_boto_returns() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockReturns()\n\n return client", "def stubbed_s3_client():\n stubber = stub.Stubber(app.S3_CLIENT)\n\n yield stubber\n\n stubber.deactivate()", "def proxy_method(self, rest_path, sign, kwargs):", "def get_boto(args=None, logger=None, stats=None):\n return Boto(**__get_arguments(args, logger, stats))", "def _imp_proxy_method(self, name, func):\n pass", "def aws():\n pass", "def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass", "def mock_s3_client():\n with mock_s3():\n yield", "def test_invoke_default_processor(mock_boto3_client, mock_boto3_resource):\n mock_boto3_client.return_value.head_object.return_value = {\n \"AcceptRanges\": \"bytes\",\n \"ContentLength\": 4372,\n \"ContentType\": \"binary/octet-stream\",\n \"ETag\": \"287e8177e8ffbc9ca08fb2afabd237ba\",\n \"LastModified\": \"Wed, 21 Nov 2018 20:05:15 GMT\",\n \"Metadata\": {\n \"s3_object_name_raw_tag\": \"s3://mock-datalake-bucket/dummy/dummy-0.txt\"\n },\n \"ResponseMetadata\": {\n \"HTTPHeaders\": {\n \"accept-ranges\": \"bytes\",\n \"content-length\": \"4372\",\n \"content-type\": \"binary/octet-stream\",\n \"date\": \"Wed, 21 Nov 2018 20:05:19 GMT\",\n \"etag\": \"287e8177e8ffbc9ca08fb2afabd237ba\",\n \"last-modified\": \"Wed, 21 Nov 2018 20:05:15 GMT\",\n \"server\": \"AmazonS3\",\n \"x-amz-id-2\": \"Fe8soQAbr2qCBMt04hu4cMvD59ugsNYRrVHlpzgFn4tV8DbQKzYStMXAvWKmgi5/+ttJjIk07e0=\",\n \"x-amz-meta-s3_object_name_raw_tag\": \"s3://mock-datalake-bucket/dummy/dummy-0.txt\",\n \"x-amz-request-id\": \"F1BE1D8588C74F8F\",\n \"x-amz-version-id\": \"X2YgsAhqxyZh9_6aTOUJC0B1.BGKX6iN\"\n },\n \"HTTPStatusCode\": 200,\n \"HostId\": \"Fe8soQAbr2qCBMt04hu4cMvD59ugsNYRrVHlpzgFn4tV8DbQKzYStMXAvWKmgi5/+ttJjIk07e0=\",\n \"RequestId\": \"F1BE1D8588C74F8F\",\n \"RetryAttempts\": 0\n },\n \"VersionId\": \"X2YgsAhqxyZh9_6aTOUJC0B1.BGKX6iN\"\n }\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"servicedesk/customer/ca_sdm/tb_call_req/latest/call_req.csv\"\n\n lambda_handler(mock_event, mock_context)", "def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)", "def mock_s3_boto_raise() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockRaise()\n\n return client", "def __virtual__():\n # the boto_ec2 execution module relies on the connect_to_region() method\n # which was added in boto 2.8.0\n # https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12\n has_boto_reqs = salt.utils.versions.check_boto_reqs(\n boto_ver=\"2.8.0\", check_boto3=False\n )\n if has_boto_reqs is True:\n __utils__[\"boto.assign_funcs\"](__name__, \"ec2\", pack=__salt__)\n return has_boto_reqs", "def _imp_proxy_method(self, name, func):\n # We just return the original function\n return func", "def monkey_path_api_requests(self, method, mock_object=None):\n attr = getattr(api.requests, method)\n mock_method = mock.MagicMock()\n setattr(api.requests, method, mock_method)\n try:\n yield mock_method\n finally:\n setattr(api.requests, method, attr)", "def _mexe(self, request, sender=None, override_num_retries=None):\r\n boto.log.debug('Method: %s' % request.method)\r\n boto.log.debug('Path: %s' % request.path)\r\n boto.log.debug('Data: %s' % request.body)\r\n boto.log.debug('Headers: %s' % request.headers)\r\n boto.log.debug('Host: %s' % request.host)\r\n response = None\r\n body = None\r\n e = None\r\n if override_num_retries is None:\r\n num_retries = config.getint('Boto', 'num_retries', self.num_retries)\r\n else:\r\n num_retries = override_num_retries\r\n i = 0\r\n connection = self.get_http_connection(request.host, self.is_secure)\r\n while i <= num_retries:\r\n # Use binary exponential backoff to desynchronize client requests\r\n next_sleep = random.random() * (2 ** i)\r\n try:\r\n # we now re-sign each request before it is retried\r\n request.authorize(connection=self)\r\n if callable(sender):\r\n response = sender(connection, request.method, request.path,\r\n request.body, request.headers)\r\n else:\r\n connection.request(request.method, request.path, request.body,\r\n request.headers)\r\n response = connection.getresponse()\r\n location = response.getheader('location')\r\n # -- gross hack --\r\n # httplib gets confused with chunked responses to HEAD requests\r\n # so I have to fake it out\r\n if request.method == 'HEAD' and getattr(response, 'chunked', False):\r\n response.chunked = 0\r\n if response.status == 500 or response.status == 503:\r\n boto.log.debug('received %d response, retrying in %3.1f seconds' %\r\n (response.status, next_sleep))\r\n body = response.read()\r\n elif response.status < 300 or response.status >= 400 or \\\r\n not location:\r\n self.put_http_connection(request.host, self.is_secure, connection)\r\n return response\r\n else:\r\n scheme, request.host, request.path, params, query, fragment = \\\r\n urlparse.urlparse(location)\r\n if query:\r\n request.path += '?' + query\r\n boto.log.debug('Redirecting: %s' % scheme + '://' + request.host + request.path)\r\n connection = self.get_http_connection(request.host, scheme == 'https')\r\n continue\r\n except self.http_exceptions, e:\r\n for unretryable in self.http_unretryable_exceptions:\r\n if isinstance(e, unretryable):\r\n boto.log.debug(\r\n 'encountered unretryable %s exception, re-raising' %\r\n e.__class__.__name__)\r\n raise e\r\n boto.log.debug('encountered %s exception, reconnecting' % \\\r\n e.__class__.__name__)\r\n connection = self.new_http_connection(request.host, self.is_secure)\r\n time.sleep(next_sleep)\r\n i += 1\r\n # If we made it here, it's because we have exhausted our retries and stil haven't\r\n # succeeded. So, if we have a response object, use it to raise an exception.\r\n # Otherwise, raise the exception that must have already happened.\r\n if response:\r\n raise BotoServerError(response.status, response.reason, body)\r\n elif e:\r\n raise e\r\n else:\r\n raise BotoClientError('Please report this exception as a Boto Issue!')", "def authorizedClientCall(self):\n\t\tclient \t\t= boto3.client(\"s3\")\n\t\treturn(client)", "def __call__(self, *args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:\n return self._target(*args, **kwargs)", "def __getattr__(self, method: str):\n @exception_handler\n def func(*args, **kwargs):\n return self._client.PyCall(method, list(args), kwargs,\n self._wait_for_ready, self._call_timeout,\n self._compress)\n\n setattr(self, method, func)\n return func", "def __proxyWrapper( self, name, args, kwargs ):\n res = self.__prepareSecurityDetails()\n if not res['OK']:\n return res\n try:\n fileCatalog = FileCatalog( ['LcgFileCatalogCombined'] )\n method = getattr( fileCatalog, name )\n except AttributeError, error:\n errStr = \"LcgFileCatalogProxyHandler.__proxyWrapper: No method named %s\" % name\n gLogger.exception( errStr, name, error )\n return S_ERROR( errStr )\n try:\n result = method( *args, **kwargs )\n return result\n except Exception, error:\n errStr = \"LcgFileCatalogProxyHandler.__proxyWrapper: Exception while performing %s\" % name\n gLogger.exception( errStr, name, error )\n return S_ERROR( errStr )", "def __getattr__(self, name):\n def wrapper(*args, **kwargs): \n if name in (\"get\", \"post\", \"put\", \"patch\", \"delete\", \"head\", \"options\"): #These are HTTP methods\n is_http_method = True\n \n #Now proxy down to the requests object (or session) \n try:\n output = getattr(self.session, name)(*args, **kwargs) #Assume a function\n except TypeError: #Assume a property\n output = getattr(self.session, name)\n \n #Now catch any special HTTP calls \n if is_http_method:\n #Examine the status, if in error, raise it\n if output.status_code >= 400: #Some error has occurred!\n raise DownloadingError(\"Unable to download from resource: '%s'\\n%s\" % (output.url, output.content))\n return output\n return wrapper #This wrapper partial allows us to pass *args and **kwargs into a __getattr__ situation", "def __getattr__(self, function_name):\r\n if function_name in ZooKeeper._ZK_SYNC_METHODS:\r\n return self._wrap_sync(function_name)\r\n elif function_name in ZooKeeper._ZK_ASYNC_METHODS:\r\n return self._wrap_async(function_name)\r\n else:\r\n raise AttributeError('%r has no attribute %r' % (self, function_name))", "def test_aws_service_api_interfaces_get(self):\n pass", "def _mexe(self, request, override_num_retries=1,\n retry_handler=None):\n log.debug('Method: %s' % request.method)\n log.debug('Url: %s' % request.url)\n log.debug('Data: %s' % request.body)\n log.debug('Headers: %s' % request.headers)\n returnValue = None\n response = None\n body = None\n ex = None\n if override_num_retries is None:\n num_retries = config.getint('TxBoto', 'num_retries', self.num_retries)\n else:\n num_retries = override_num_retries\n i = 0\n while i <= num_retries:\n # Use binary exponential backoff to desynchronize client requests.\n next_sleep = min(random.random() * (2 ** i),\n config.get('TxBoto', 'max_retry_delay', 60))\n try:\n request.authorize(connection=self)\n log.debug('Final headers: %s' % request.headers)\n request.start_time = datetime.now()\n\n response = yield self.send_request(request)\n response_body = yield response.content()\n response.reason = code2status(response.code, 'N/A')\n log.debug('Response headers: %s' % response.headers)\n location = response.headers.getRawHeaders('location')\n if location:\n location = location[0]\n if callable(retry_handler):\n status = yield defer.maybeDeferred(retry_handler, response,\n response_body, i,\n next_sleep)\n if status:\n msg, i, next_sleep = status\n if msg:\n log.debug(msg)\n time.sleep(next_sleep)\n continue\n if response.code in [500, 502, 503, 504]:\n msg = 'Received %d response. ' % response.code\n msg += 'Retrying in %3.1f seconds' % next_sleep\n log.debug(msg)\n body = response_body\n if isinstance(body, bytes):\n body = body.decode('utf-8')\n elif response.code < 300 or response.code >= 400 or \\\n not location:\n # don't return connection to the pool if response contains\n # Connection:close header, because the connection has been\n # closed and default reconnect behavior may do something\n # different than new_http_connection. Also, it's probably\n # less efficient to try to reuse a closed connection.\n if self.request_hook is not None:\n yield defer.maybeDeferred(\n self.request_hook.handle_request_data,\n request, response)\n returnValue = (response, response_body,)\n break\n except PleaseRetryException as e:\n log.debug('encountered a retry exception: {}'.foramt(e))\n response = e.response\n ex = e\n except self.http_exceptions as e:\n if isinstance(e, self.http_unretryable_exceptions):\n log.debug('encountered unretryable {} exception, re-raising'\n .format(e.__class__.__name__))\n raise\n log.debug('encountered {} exception, reconnecting'\n .format(e.__class__.__name__))\n ex = e\n time.sleep(next_sleep)\n i += 1\n\n if isinstance(returnValue, tuple):\n defer.returnValue(returnValue)\n # If we made it here, it's because we have exhausted our retries\n # and stil haven't succeeded. So, if we have a response object,\n # use it to raise an exception.\n # Otherwise, raise the exception that must have already happened.\n if self.request_hook is not None:\n yield defer.maybeDeferred(self.request_hook.handle_request_data,\n request, response, error=True)\n if response:\n raise BotoServerError(response.status, response.reason, body)\n elif ex:\n raise ex\n else:\n msg = 'Please report this exception as a TxBoto Issue!'\n raise BotoClientError(msg)", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_keypairs_get(self):\n pass", "def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))", "def _wrapped_method(self, _meth_name, *args, **kwargs):\n return self._delegate(_meth_name, *args, **kwargs)", "def __init__(self):\n self.aws = AWS()", "def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None,\n host=None, debug=0, https_connection_factory=None,\n calling_format=None, path='/', provider='aws',\n bucket_class=None, mock_s3_fs=None):\n # use mock_s3_fs even if it's {}\n self.mock_s3_fs = combine_values({}, mock_s3_fs)\n self.endpoint = host or 's3.amazonaws.com'", "def test_aws_service_api_public_images_get(self):\n pass", "def _call_method(self, module, method, *args, **kwargs):\n return self.invoke_api(module, method, *args, **kwargs)", "def __call__(self, *args, **kwargs):\n if self.secure and not self.avatar.authenticated:\n logger.error('Attempted access to secured method before authentication: %s - %s', (self.avatar.name, self.function))\n return\n\n return self.function(self.avatar.name, *args, **kwargs)", "def boto3_client(service='ec2'):\n\n def wrapper(func):\n @wraps(func)\n def wrapped_func(user, *args, **kwargs):\n region = request.args.get('region') or 'eu-west-1'\n client = get_client(user['access_key'], user['secret_key'],\n region=region, service=service)\n return func(user, client, *args, **kwargs)\n\n return wrapped_func\n\n return wrapper", "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "def _setup_aws_clients(self) -> None:", "def __init__(self, temboo_session):\n super(PutBucketWebsiteRedirect, self).__init__(temboo_session, '/Library/Amazon/S3/PutBucketWebsiteRedirect')", "def call(self, method, name, params=None, payload=None, **kwds):", "def test_invoke_dummy_plugin(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"dummy/dummy00.txt\"\n lambda_handler(mock_event, mock_context)", "def test_aws_service_api_private_images_get(self):\n pass", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def __getattribute__(self, name):\n attr = object.__getattribute__(self, name)\n if hasattr(attr, '__call__'):\n def newfunc(*args, **kwargs):\n # Allow async calls to methods (promises)\n if 'async' in kwargs: del kwargs['async']\n obj = Pyro4.Proxy(self.name)\n print \"---Proxy {}\".format(obj)\n return obj.perform()\n # result = func(*args, **kwargs)\n # return result\n return newfunc\n else:\n return attr", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def test_invoke_skip_plugin(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"servicedesk/customer/ca_sdm/tb_call_req/2018-07-02/call_req.csv\"\n mock_boto3_client.return_value.head_object.return_value = {\n \"ResponseMetadata\": {\n \"HTTPHeaders\": {\n \"content-length\": 1024,\n \"content-type\": \"text/plain\",\n \"last-modified\": \"Sun, 1 Jan 2006 12:00:00 GMT\"\n }\n }\n }\n lambda_handler(mock_event, mock_context)", "def boto3_stubber(mocker, boto3_stubber_path):\n __tracebackhide__ = True\n created_stubbers = []\n mocked_clients = {}\n\n mocked_client_factory = mocker.patch(boto3_stubber_path, autospec=True)\n # use **kwargs to skip parameters passed to the boto3.client other than the \"service\"\n # e.g. boto3.client(\"ec2\", region_name=region, ...) --> x = ec2\n mocked_client_factory.client.side_effect = lambda x, **kwargs: mocked_clients[x]\n\n def _boto3_stubber(service, mocked_requests):\n if \"AWS_DEFAULT_REGION\" not in os.environ:\n # We need to provide a region to boto3 to avoid no region exception.\n # Which region to provide is arbitrary.\n os.environ[\"AWS_DEFAULT_REGION\"] = \"us-east-1\"\n client = boto3.client(service)\n stubber = Stubber(client)\n # Save a ref to the stubber so that we can deactivate it at the end of the test.\n created_stubbers.append(stubber)\n\n # Attach mocked requests to the Stubber and activate it.\n if not isinstance(mocked_requests, list):\n mocked_requests = [mocked_requests]\n for mocked_request in mocked_requests:\n if mocked_request.generate_error:\n stubber.add_client_error(\n mocked_request.method,\n service_message=mocked_request.response,\n expected_params=mocked_request.expected_params,\n service_error_code=mocked_request.error_code,\n )\n else:\n stubber.add_response(\n mocked_request.method, mocked_request.response, expected_params=mocked_request.expected_params\n )\n stubber.activate()\n\n # Add stubber to the collection of mocked clients. This allows to mock multiple clients.\n # Mocking twice the same client will replace the previous one.\n mocked_clients[service] = client\n return client\n\n # yield allows to return the value and then continue the execution when the test is over.\n # Used for resources cleanup.\n yield _boto3_stubber\n\n # Assert that all mocked requests were consumed and deactivate all stubbers.\n for stubber in created_stubbers:\n stubber.assert_no_pending_responses()\n stubber.deactivate()", "def __call__(self, method, url, *args, **kwargs):\n log.debug('{} {}'.format(method.upper(), url))\n if 'params' in kwargs:\n kwargs['query'] = kwargs.pop('params')\n return getattr(self.client, method)(url, *args, **kwargs).json", "def __getattr__(self, name):\n if name in self.propertysynonyms: # Reset real name if argument provided in lower or camel case\n name = self.propertysynonyms[name]\n if self.serviceimplementation == 'basic':\n if name in ('serviceproperties', 'localProperties', 'internal_attributes', 'propertysynonyms',\n 'forceGetProperty'):\n pass\n elif name in self.serviceproperties:\n if self.forceGetProperty is False and self.serviceproperties[name] is False: # False = read-only\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n # Get Property from Basic and store it\n prop = self.GetProperty(name)\n self.__dict__[name] = prop\n return prop\n else: # Get Property from Basic and do not store it\n return self.GetProperty(name)\n # Execute the usual attributes getter\n return super(SFServices, self).__getattribute__(name)", "def __getattr__(self, method_name):\n return partial(self.exec, method_name.replace(\"_\", \" \"))", "def __call(self, **kwargs):\n return self.__call_api(kwargs)", "def __call__(self, *args, **kwargs):\n return self.__wrapped__(*args, **kwargs)", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def unAuthorizedClientCall(self):\n\t\tclient \t\t= boto3.client(\"s3\", config=botocore.config.Config(signature_version=botocore.UNSIGNED))\n\t\treturn(client)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_snapshots_get(self):\n pass", "def _inner_getattr(self, endpoint, **parameters):\n request = self._build_request(endpoint, **parameters)\n\n return self.requestor.request(**request)", "async def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _B:\n await self._timeToRun()\n return await self._original_method(self._original_self, *args, **kwargs)", "def call(self, **kwargs):\n return getattr(self.resource, self.function)(**kwargs)", "def wrapper(fn):\n if name is None:\n name_ = fn.__name__\n else:\n name_ = name\n original_method = getattr(cls,name_,default)\n new_method = fn(original_method)\n setattr(cls,name_,new_method)\n return fn", "def __set_amazon_s3_service__(self, access_key, secret_key):\n self.s3_conn = S3Connection(access_key, secret_key)", "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "def __get__(self, _instance, _inst_cls):\n return getattr(self._call_proxy_inst, self._magic_name)", "def test_aws_service_api_private_image_get(self):\n pass", "def __getattr__(self, name):\n return getattr(self._client, name)", "def export_callProxyMethod( self, name, args, kargs ):\n res = pythonCall( 120, self.__proxyWrapper, name, args, kargs )\n if res['OK']:\n return res['Value']\n else:\n return res", "def __get__(self, instance, owner):\n return self.method(owner)", "def __getattr__(self, name):\n return functools.partial(self._obj.request, self._api_prefix + name)", "def __call__(self, *args, **kwargs): # real signature unknown\n pass", "def s3_request(func):\n\n\t@wraps (func)\n\tdef wrapper(url, *args, **kwargs):\n\t\ttry:\n\t\t\treturn func (url, *args, **kwargs)\n\t\texcept ClientError as exc:\n\t\t\tif int (exc.response[\"Error\"][\"Code\"]) == 404:\n\t\t\t\traise EnvironmentError (\"file {} not found\".format (url))\n\t\t\telse:\n\t\t\t\traise\n\n\treturn wrapper", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def __call__(self, **action_kwargs):\n\n return SOAP.send(self.service, self, **action_kwargs)", "def call(self, *args, **kwargs):", "def hook(cls, cov, method_name):\n method = getattr(cov, method_name)\n hook = cls(method)\n setattr(cov, method_name, hook.wrapper)\n return hook", "def image_proxy(img):\n\n def _set(*args):\n __pragma__(\"noalias\", \"set\")\n value = img.set(*args)\n __pragma__(\"alias\", \"set\", \"py_set\")\n return value\n\n def _get(*args):\n __pragma__(\"noalias\", \"get\")\n value = img.get(*args)\n __pragma__(\"alias\", \"get\", \"py_get\")\n return value\n\n img.set = _set\n img.get = _get\n return img", "def _iter_call_meth(self, method, *args, **kwargs):\n for obj in self:\n if hasattr(obj, method):\n f = op.methodcaller(method, *args, **kwargs)\n f(obj)", "def s3_client(s3_url):\n with moto.mock_s3():\n with clients.S3Client(s3_url) as client:\n yield client", "def __paginate_call(client, method, output_key, params=None):\n def is_response_success(response):\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n params = dict() if params is None else params\n params['PaginationConfig'] = dict(PageSize=AWS_PAGE_SIZE)\n\n paginator = client.get_paginator(method)\n responses = list(paginator.paginate(**params))\n\n if not all([is_response_success(r) for r in responses]):\n raise Exception('Error during execution of method {method}'.format(method=method))\n\n responses = [r[output_key] for r in responses]\n return reduce(lambda x, y: x + y, responses)", "def apply_method(self, r, **attr):\n\n resource = self.resource\n rules = resource.get_config(\"anonymize\")\n if not rules:\n r.error(405, \"Anonymizing not configured for resource\")\n\n record_ids = current.session.s3.get(\"anonymize_record_ids\")\n if not record_ids:\n r.error(400, \"No target record(s) specified\")\n\n table = resource.table\n\n # Check permission for each record\n has_permission = current.auth.s3_has_permission\n for record_id in record_ids:\n if not has_permission(\"update\", table, record_id=record_id) or \\\n not has_permission(\"delete\", table, record_id=record_id):\n r.unauthorised()\n\n output = {}\n\n if r.representation == \"html\":\n if r.http == \"GET\":\n # Show form\n anonymise_btn = S3AnonymizeBulkWidget.widget(r,\n record_ids = record_ids,\n _class = \"action-btn anonymize-btn\",\n )\n current.response.view = \"simple.html\"\n output = {\"item\": anonymise_btn,\n \"title\": current.T(\"Anonymize Records\"),\n }\n elif r.http == \"POST\":\n # Process form\n output = self.anonymize(r, table, record_ids)\n del current.session.s3[\"anonymize_record_ids\"]\n next_url = resource.get_config(\"anonymize_next\")\n if next_url:\n redirect(next_url)\n else:\n r.error(405, current.ERROR.BAD_METHOD)\n else:\n r.error(415, current.ERROR.BAD_FORMAT)\n\n return output", "def mock_s3_fixture():\n with mock_s3():\n yield", "def __call__(self, *args, **kwargs):\n return self._func(*args, **kwargs)", "def callmethod(\n self, method: str, *args: Sequence[Any], **kwargs: Sequence[Any]\n ) -> List[Any]:\n return getattr(self, method)(*args, **kwargs)", "def map_method(self, method_name, *args, **kwds):\r\n return self.map(self._call_extension_method,\r\n method_name, *args, **kwds)", "def __init__(self, name: str, args: S3Args, opts: ResourceOptions = None):\n super().__init__(\"custom:resource:S3\", name, {}, opts)\n \"\"\"Override ComponentResource class constructor\"\"\"\n\n self.bucket_final = Output.all(\n args.project_name,\n args.bucket_name\n ).apply(\n lambda arg: f\"{arg[0]}-{arg[1]}\"\n )\n\n self.bucket = aws.s3.Bucket(\n args.bucket_name,\n bucket=self.bucket_final,\n acl=\"private\",\n tags={\n \"BillingCode\": args.billing_code,\n \"Name\": self.bucket_final,\n \"Project\": args.project_name,\n },\n server_side_encryption_configuration={\n \"rule\": {\n \"applyServerSideEncryptionByDefault\": {\n \"sseAlgorithm\": \"AES256\",\n },\n },\n },\n opts=ResourceOptions(parent=self)\n )\n\n self.deny_vpce_policy = Output.all(\n args.ec2_role_arn,\n self.bucket.arn,\n args.vpc_endpoint_id\n ).apply(\n lambda args:\n aws.iam.get_policy_document(\n version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Access-to-specific-VPCE-only\",\n principals=[\n aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n identifiers=[args[0]],\n type=\"AWS\",\n )\n ],\n actions=[\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:ListBucket\",\n \"s3:PutObject\",\n \"s3:RestoreObject\",\n ],\n effect=\"Deny\",\n resources=[\n args[1],\n args[1]+\"/*\"\n ],\n conditions=[\n aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"StringNotEquals\",\n values=[args[2]],\n variable=\"aws:sourceVpce\",\n )\n ],\n )\n ],\n opts=ResourceOptions(parent=self.bucket)\n )\n )\n\n admin_principals = []\n for admin in args.admin_list:\n admin_principals.append(f\"arn:aws:iam::{current_id}:user/{admin}\")\n\n self.admin_access_policy = Output.all(self.bucket.arn).apply(\n lambda args:\n aws.iam.get_policy_document(\n version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"admin-access\",\n principals=[\n aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n identifiers=admin_principals,\n type=\"AWS\",\n )\n ],\n actions=[\"s3:*\"],\n effect=\"Allow\",\n resources=[\n args[0],\n args[0]+\"/*\"\n ],\n )\n ],\n opts=ResourceOptions(parent=self.bucket)\n )\n )\n\n self.policy = aws.s3.BucketPolicy(\n f'{args.bucket_name}-policy',\n bucket=self.bucket.id,\n policy=aws.iam.get_policy_document(\n source_json=self.deny_vpce_policy.json,\n override_json=self.admin_access_policy.json,\n ).json,\n opts=ResourceOptions(parent=self.bucket)\n )\n\n self.register_outputs({})", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n try:\n response = s3.get_object(Bucket=BUCKET, Key=KEY)\n print('CONTENT TYPE:', response['ContentType'])\n print('response:')\n pprint.pprint(response)\n print('event')\n pprint.pprint(event)\n print('payload')\n pprint.pprint(event.get('payload'))\n # return json.loads(json.dumps(response, default=str))\n # defined by https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return {\n 'statusCode': 200,\n 'isBase64Encoded': False,\n 'body': json.dumps(response, default=str)\n }\n # return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(KEY, BUCKET))\n raise e", "def __call__(self, *events):\n if self._func_is_method and self._ob is not None:\n return self._func(self._ob(), *events)\n else:\n return self._func(*events)", "def s3_request(func):\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper", "def s3_request(func):\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper", "def test_aws_service_api_vm_patch(self):\n pass", "def tranquilize(method='get', requires_authentication=None):\n\n #just to be safe\n method = method.lower()\n\n def _dart(f):\n f._spec = _prepare(f)\n f._method = method\n f._methods = None\n f._requires_authentication = requires_authentication\n return f\n\n return _dart", "def test_aws_service_api_keypair_get(self):\n pass", "def call(\n self,\n method,\n path,\n params=None,\n headers=None,\n files=None,\n url_override=None,\n api_version=None,\n ):\n if self._should_restore_default_page_size(params):\n params.update(**{\"limit\": self.default_page_size})\n response = super().call(method, path, params, headers, files, url_override, api_version)\n self._update_insights_throttle_limit(response)\n self._handle_call_rate_limit(response, params)\n return response", "def ServiceMethod(fn):\n\n fn.IsServiceMethod = True\n return fn", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n\n return self.__class__(self._obj, *args, **kwargs)", "def __call__(self, method):\n self._method = method\n\n async def wrapper(*args, **kwargs):\n \"\"\"Wrap the method.\"\"\"\n if self.name is None:\n self.name = str(self._method.__qualname__).lower().replace(\".\", \"_\")\n try:\n self._coresys = args[0].coresys\n except AttributeError:\n return False\n\n if not self._coresys:\n raise JobException(f\"coresys is missing on {self.name}\")\n\n job = self._coresys.jobs.get_job(self.name)\n\n if self.conditions and not self._check_conditions():\n return False\n\n try:\n return await self._method(*args, **kwargs)\n except HassioError as err:\n raise err\n except Exception as err:\n _LOGGER.exception(\"Unhandled exception: %s\", err)\n sentry_sdk.capture_exception(err)\n raise JobException() from err\n finally:\n if self.cleanup:\n self._coresys.jobs.remove_job(job)\n\n return wrapper", "def decorate_HTTP_verb_method(method):\n @functools.wraps(method)\n def wrapper(self, RIC_base_uri, **kwargs):\n partition = kwargs.pop('partition', '')\n name = kwargs.pop('name', '')\n sub_path = kwargs.pop('subPath', '')\n suffix = kwargs.pop('suffix', '')\n uri_as_parts = kwargs.pop('uri_as_parts', False)\n if uri_as_parts:\n REST_uri = generate_bigip_uri(RIC_base_uri, partition, name,\n sub_path, suffix, **kwargs)\n else:\n REST_uri = RIC_base_uri\n pre_message = \"%s WITH uri: %s AND suffix: %s AND kwargs: %s\" %\\\n (method.__name__, REST_uri, suffix, kwargs)\n logging.debug(pre_message)\n response = method(self, REST_uri, **kwargs)\n post_message =\\\n \"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:\"\\\n \" %s\\nText: %r\" % (response.status_code,\n response.headers.get('Content-Type', None),\n response.headers.get('Content-Encoding', None),\n response.text)\n logging.debug(post_message)\n if response.status_code not in range(200, 207):\n error_message = '%s Unexpected Error: %s for uri: %s\\nText: %r' %\\\n (response.status_code,\n response.reason,\n response.url,\n response.text)\n raise iControlUnexpectedHTTPError(error_message, response=response)\n return response\n return wrapper", "def test_aws_service_api_vm_get(self):\n pass", "def mock_amazon():\n amazon = Amazon()\n amazon.carrot1 = 'cenoura normal'\n amazon.carrot2 = 'cenoura radioativa'\n amazon.carrot_number = 575\n return amazon", "def __getattr__(self, method):\n def run_callback(func, plus, result):\n \"\"\"Execute the given callback safely.\n Get data and/or error from result and call func passing it\n data, plus (if needed) and error. Catch, log and suppress\n all exceptions.\n func (function): the callback to invoke.\n plus (object): optional additional data.\n result (AsyncResult): the result of a (finished) RPC call.\n \"\"\"\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)\n\n def remote_method(**data):\n \"\"\"Forward arguments to execute_rpc.\n \"\"\"\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result\n\n return remote_method" ]
[ "0.6867628", "0.6424001", "0.55805993", "0.5508542", "0.5488377", "0.5417456", "0.53769153", "0.534277", "0.5236493", "0.5206002", "0.5194725", "0.51764464", "0.5175369", "0.51672405", "0.5091889", "0.5048222", "0.50451034", "0.5020509", "0.5010022", "0.49961105", "0.49813974", "0.497495", "0.49507272", "0.4944728", "0.4880305", "0.48793724", "0.48755848", "0.48643434", "0.4854807", "0.48447433", "0.48442733", "0.4822769", "0.47975942", "0.47784844", "0.47649994", "0.47555003", "0.47518817", "0.4751196", "0.4746962", "0.47456482", "0.47441098", "0.4740985", "0.47397283", "0.47276604", "0.4721331", "0.4719694", "0.47148743", "0.4710874", "0.4704307", "0.47035956", "0.46868995", "0.46592343", "0.46499106", "0.46457848", "0.46294546", "0.4618945", "0.46138218", "0.46078742", "0.4604254", "0.45999068", "0.4596586", "0.4586505", "0.4586394", "0.4583155", "0.45830157", "0.45728898", "0.45718732", "0.455921", "0.4548159", "0.45466152", "0.45461008", "0.45441145", "0.45399725", "0.4539744", "0.45342577", "0.45336574", "0.45335382", "0.45309645", "0.4528861", "0.45274556", "0.4517319", "0.4514349", "0.45108208", "0.45018792", "0.45013765", "0.44945487", "0.44945487", "0.4494418", "0.4491223", "0.44855618", "0.4483454", "0.4481721", "0.4481325", "0.4481325", "0.4471434", "0.44712332", "0.44702312", "0.44676432", "0.44671285", "0.44643906" ]
0.6325436
2
Gets all AWS regions that Krux can access
def get_valid_regions(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_valid_regions(self):\n conn = self._boto.ec2.connect_to_region(self.cli_region)\n\n regions = []\n for region in conn.get_all_regions():\n if getattr(RegionCode.Region, region.name, None) is not None:\n regions.append(RegionCode.Region[region.name])\n else:\n regions.append(region.name)\n\n return regions", "def get_valid_regions(self):\n client = self._boto.client('ec2')\n\n regions = []\n for region in client.describe_regions().get('Regions', []):\n if getattr(RegionCode.Region, region.get('RegionName'), None) is not None:\n regions.append(RegionCode.Region[region.get('RegionName')])\n else:\n regions.append(region.get('RegionName'))\n\n return regions", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def test_aws_service_api_regions_get(self):\n pass", "def get_regions(self):\n return self._regions", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def regions(self, member_state):\n rates = self._get_rates(member_state)\n return list(rates.regions.keys())", "def regions(self):\n return self._regions", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def filter_regions(self):\n return self.filter_nodes('/DistrictBuilder/Regions/Region')", "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def get_regions(self,online=False):\n clients = HWIOS.pb_server.get_clients()\n regions = []\n for client in clients:\n for service in client.region_services:\n if online: \n if service['status'] == 'ON':\n for region in service['regions']:\n regions.append(region)\n else:\n for region in service['regions']:\n region['status'] = service['status']\n regions.append(region)\n return regions", "def regions(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleRegionResult']]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def region_clients(self, **kwargs):\n return stats.region_clients(self._host, self._session, **kwargs)", "def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions", "def get_regions(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self.R", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def scope(self) -> List[Region]:\n return [self]", "def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def DescribeAccessRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_all_in_region(self, cloud_account_id: str, region_id: str) -> List[Dict]:\n\t\tquery_parameters = {'cloudAccountId': cloud_account_id, 'regionId': region_id}\n\t\treturn self._get(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, params=query_parameters)", "def get_regions():\n\n # Also known as the 'climbing directory'\n route_guide = urlopen('https://www.mountainproject.com/route-guide',\n context=ctx)\n # Opens HTML\n region_html = route_guide.read()\n # Parses HTML with BS package\n region_soup = BeautifulSoup(region_html, 'html.parser')\n # Finds regions area of the page\n regions = region_soup.find('div', id='route-guide')\\\n .find_all('div', class_='mb-half')\n\n for region in regions:\n # Link to region area guide\n url = region.find('a')['href']\n # English name of region\n region_name = region.find('a').get_text()\n # Writes region name and url to Areas DB. This gives the region a\n # unique id automatically\n cursor.execute('''\n INSERT INTO Areas(url, name)\n VALUES ('%s', '%s')\n ON CONFLICT DO NOTHING\n ''' % (url, region_name))\n # Commits to DB\n conn.commit()", "def region(self):\n return [node.region for node in self]", "def region(self):\n return regions.lookup(self.state)", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def get_db_regions(self, context, regions):\n regions_objs = self.dns_manager.get_db_regions(context, regions)\n return regions_objs", "def scope(self) -> List[Region]:\n return self._scope", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def describe_regions(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_regions_with_options(request, runtime)", "def lookups(self, request, model_admin):\r\n list_of_regions = []\r\n queryset = Region.objects.filter(parent__isnull=True).order_by(\"name\")\r\n for region in queryset:\r\n list_of_regions.append((str(region.id), region.name))\r\n return list_of_regions", "def get_all_regions(self, region_names=None, filters=None):\r\n params = {}\r\n if region_names:\r\n self.build_list_params(params, region_names, 'RegionName')\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n regions = self.get_list('DescribeRegions', params,\r\n [('item', RegionInfo)], verb='POST')\r\n for region in regions:\r\n region.connection_cls = EC2Connection\r\n return regions", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def regions_by_tag(self, *tags: str) -> Iterable[str]:\n node = self.shards_xml(\"regionsbytag\", tags=\",\".join(tags))[\"regions\"]\n text = node.text if node.text else \"\"\n return text.split(\",\")", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def _get_global_table_all_regions(table_name: str) -> List[dict]:\n description = _describe_table(table_name=table_name)\n replicas = description['Table'].get('Replicas', [])\n return replicas", "def GetWorldRegions():\n return GetDataFromCsvFile('world_regions.csv')", "def get_regionlist(chosenmodel):\n regionlist = list(chosenmodel.regions.keys())\n [ regionlist.remove(key) for key in regionlist\n if type(chosenmodel.regions[key]) is dict ]\n return regionlist", "def test_api_regions(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load regions from url specified in api base\n r = requests.get(r['regions']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('regions', r)", "def vpc_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_region\")", "def get_aws_reserved_networks(region=None, all_regions=False):\n result = []\n if all_regions:\n for aws_region in get_aws_regions_list():\n for vpc in boto3.client('ec2', region_name=aws_region).describe_vpcs()['Vpcs']:\n result.append(vpc)\n else:\n result = boto3.client('ec2', region_name=region).describe_vpcs()['Vpcs']\n\n vpc_used_cidr_list = []\n for vpc in result:\n vpc_used_cidr_list.append(PyVPCBlock(network=ipaddress.ip_network(vpc['CidrBlock']),\n resource_id=vpc['VpcId'],\n name=get_aws_resource_name(vpc),\n resource_type='vpc'))\n return vpc_used_cidr_list", "def test_aws_service_api_availability_zones_get(self):\n pass", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def source_region_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_region_ids\")", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def DescribeAccessRegionsByDestRegion(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegionsByDestRegion\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsByDestRegionResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "async def describe_regions_async(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_regions_with_options_async(request, runtime)", "def operating_regions(self) -> pulumi.Output[Optional[Sequence['outputs.IpamOperatingRegion']]]:\n return pulumi.get(self, \"operating_regions\")", "def getRegions(self, clearCache=False):\n if clearCache:\n self._regionCache = None\n if self._regionCache is not None:\n return self._regionCache\n\n self.lock.acquire()\n\n regions = []\n self._regionsByName = {}\n\n # Iterate over all descriptors (even numbered regions)\n for index in range(0, MAX_REGIONS, 2):\n def storeDescriptor(descriptor, index=index):\n size = struct.unpack(\"<I\", descriptor[:4])[0]\n name = descriptor[4:].split('\\x00')[0]\n if name:\n region = Region(index + 1, size, name)\n regions.append(region)\n self._regionsByName[name] = region\n\n # Send the command the low-level way, since we already have the lock.\n self.recv.queue.put((MAX_DESCRIPTOR_LEN, storeDescriptor))\n self.send.queue.put(opSetRegion(index) + opReadLongs(MAX_DESCRIPTOR_LEN))\n\n self.recv.queue.join()\n self._regionCache = regions\n\n self.lock.release()\n return regions", "def get_regionlist_of_componentgroup(chosenmodel, componentgroup_name):\n return list(chosenmodel.regions[componentgroup_name].keys())", "def DescribeDestRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeDestRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeDestRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def get_region(self):\n return self.creds.get('region_name')", "def endpoint_group_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_group_region\")", "def load_all_countries(self):\n core = self.core\n regionNodes = core.load_children(self.META[\"Countries\"])\n countryNodes = []\n if regionNodes:\n for regionNode in regionNodes:\n if core.get_base_type(regionNode) == self.META[\"Region\"]:\n countryNodes += core.load_children(regionNode)\n return countryNodes\n else:\n print(\"There are no regions in the database\")", "def listPredefinedRegions(self):\n\n res = self._Client__proxy.listPredefinedRegions(\n self._Client__session)\n\n self.checkResult(res)\n return res[\"predefinedRegions\"]", "def region(self):\n return self.config.region", "def list_internet_gateways(\n profile_name: str = 'terraform',\n region_name: str = 'us-east-1'\n) -> [str]:\n session = boto3.Session(profile_name=profile_name)\n client = session.client(service_name='ec2', region_name=region_name)\n elements = client.describe_internet_gateways()\n _lines = []\n for element in elements['InternetGateways']:\n x = InternetGateway(element['InternetGatewayId'])\n x.vpc_id = element['Attachments'][0]['VpcId']\n x.tags = element['Tags']\n _lines.append(x)\n return _lines", "def get_zones(region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n return [z.name for z in conn.get_all_zones()]", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def azs_lookup(session, lambda_compatible_only=False):\n if session is None:\n return []\n\n client = session.client('ec2')\n response = client.describe_availability_zones()\n # SH Removing Hack as subnet A is already in Production and causes issues trying to delete\n # We will strip out subnets A and C when creating the lambdas.\n #rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"] if z['ZoneName'] != 'us-east-1a']\n rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"]]\n\n if lambda_compatible_only:\n current_account = get_account_id_from_session(session)\n for az in rtn.copy():\n if az[1] == 'c' and current_account == hosts.PROD_ACCOUNT:\n rtn.remove(az)\n if az[1] == 'a' and current_account == hosts.DEV_ACCOUNT:\n rtn.remove(az)\n return rtn", "def whitelist_regions(self):\n return getattr(self, '_do_whitelist_regions', False)", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def get_transcript_regions(self, transcriptId):\n\n regions = self.get_regions('exon')\n transcriptRegions = []\n for region in regions:\n if region.transcriptId == transcriptId:\n transcriptRegions.append(region)\n return transcriptRegions", "def get_region_services(self,format=None):\n clients = HWIOS.pb_server.get_clients()\n region_services = []\n for client in clients:\n region_services.extend(client.region_services)\n #for django forms\n if format == 'tuple':\n tuple_list = []\n for region_service in region_services:\n tuple_list.append((region_service['uuid'],region_service['name']))\n return tuple_list\n return region_services", "def region(self):\n return self._get(\"region\")", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def operating_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpamOperatingRegionArgs']]]]:\n return pulumi.get(self, \"operating_regions\")", "def get(self, request):\n conn = get_sdk_connection(request)\n availability_zone_list = _sdk_object_to_list(\n conn.load_balancer.availability_zones()\n )\n\n return {'items': availability_zone_list}", "def children(self) -> List[Region]:\n return []", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def rendered_regions(self, obj):\n return obj.render_json(self.context.get('request'))", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def load_aws_region_name() -> str:\n session = boto3.session.Session()\n region_name = (\n click.get_current_context().params.get(\"region\") or session.region_name\n )\n return region_name", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def get_all_zones():\n cf = CloudFlare.CloudFlare(raw=True)\n page_number = 0\n total_pages = 1\n all_zones = []\n while page_number < total_pages:\n page_number += 1\n raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})\n zones = raw_results['result']\n all_zones += zones\n total_pages = raw_results['result_info']['total_pages']\n return all_zones", "def get_bucketlist():\n pass", "def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]", "def ReadRegions(self, fname=\"temp\"):\n self.fname = fname\n command = \"\"\"IPython.notebook.kernel.execute('file = open(\"temp\", \"w\"); [file.write(x[\"wcsstr\"]) for x in '+ JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})) +']; file.close()');\"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def region(self) -> str:\n return pulumi.get(self, \"region\")" ]
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.7289586", "0.728351", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", "0.65845656", "0.65719104", "0.65711975", "0.64892554", "0.64801663", "0.6439613", "0.6420435", "0.6401047", "0.64007866", "0.639505", "0.6384996", "0.6370646", "0.63652235", "0.6356448", "0.62927395", "0.627321", "0.61878794", "0.61749566", "0.6168414", "0.6155335", "0.608255", "0.6062303", "0.6050333", "0.6031984", "0.60035217", "0.5989563", "0.5983013", "0.591161", "0.5883922", "0.5882775", "0.5842723", "0.58391607", "0.5813007", "0.5810873", "0.5798928", "0.5797396", "0.57929695", "0.57811475", "0.57782423", "0.5777307", "0.5755902", "0.57420397", "0.5738982", "0.5728668", "0.5728234", "0.5718206", "0.5695173", "0.5688639", "0.5686243", "0.56831473", "0.5667013", "0.56665355", "0.5612399", "0.5612315", "0.5601767", "0.5580547", "0.5580073", "0.55715996", "0.5556764", "0.5535209", "0.5527436", "0.5499612", "0.5489355", "0.54797554", "0.5470251", "0.5467954", "0.5465599", "0.54629457", "0.5452229", "0.5449347", "0.5449347", "0.5448797", "0.5441852", "0.54281044", "0.54222935", "0.54175586", "0.5406034", "0.5404364", "0.54041964" ]
0.65673536
24
Gets all AWS regions that Krux can access
def get_valid_regions(self): conn = self._boto.ec2.connect_to_region(self.cli_region) regions = [] for region in conn.get_all_regions(): if getattr(RegionCode.Region, region.name, None) is not None: regions.append(RegionCode.Region[region.name]) else: regions.append(region.name) return regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_valid_regions(self):\n client = self._boto.client('ec2')\n\n regions = []\n for region in client.describe_regions().get('Regions', []):\n if getattr(RegionCode.Region, region.get('RegionName'), None) is not None:\n regions.append(RegionCode.Region[region.get('RegionName')])\n else:\n regions.append(region.get('RegionName'))\n\n return regions", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def test_aws_service_api_regions_get(self):\n pass", "def get_regions(self):\n return self._regions", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def regions(self, member_state):\n rates = self._get_rates(member_state)\n return list(rates.regions.keys())", "def regions(self):\n return self._regions", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def filter_regions(self):\n return self.filter_nodes('/DistrictBuilder/Regions/Region')", "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def get_regions(self,online=False):\n clients = HWIOS.pb_server.get_clients()\n regions = []\n for client in clients:\n for service in client.region_services:\n if online: \n if service['status'] == 'ON':\n for region in service['regions']:\n regions.append(region)\n else:\n for region in service['regions']:\n region['status'] = service['status']\n regions.append(region)\n return regions", "def regions(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleRegionResult']]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def region_clients(self, **kwargs):\n return stats.region_clients(self._host, self._session, **kwargs)", "def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions", "def get_regions(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self.R", "def get_valid_regions(self):\n pass", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def scope(self) -> List[Region]:\n return [self]", "def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def DescribeAccessRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_all_in_region(self, cloud_account_id: str, region_id: str) -> List[Dict]:\n\t\tquery_parameters = {'cloudAccountId': cloud_account_id, 'regionId': region_id}\n\t\treturn self._get(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, params=query_parameters)", "def get_regions():\n\n # Also known as the 'climbing directory'\n route_guide = urlopen('https://www.mountainproject.com/route-guide',\n context=ctx)\n # Opens HTML\n region_html = route_guide.read()\n # Parses HTML with BS package\n region_soup = BeautifulSoup(region_html, 'html.parser')\n # Finds regions area of the page\n regions = region_soup.find('div', id='route-guide')\\\n .find_all('div', class_='mb-half')\n\n for region in regions:\n # Link to region area guide\n url = region.find('a')['href']\n # English name of region\n region_name = region.find('a').get_text()\n # Writes region name and url to Areas DB. This gives the region a\n # unique id automatically\n cursor.execute('''\n INSERT INTO Areas(url, name)\n VALUES ('%s', '%s')\n ON CONFLICT DO NOTHING\n ''' % (url, region_name))\n # Commits to DB\n conn.commit()", "def region(self):\n return [node.region for node in self]", "def region(self):\n return regions.lookup(self.state)", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def get_db_regions(self, context, regions):\n regions_objs = self.dns_manager.get_db_regions(context, regions)\n return regions_objs", "def scope(self) -> List[Region]:\n return self._scope", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def describe_regions(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_regions_with_options(request, runtime)", "def lookups(self, request, model_admin):\r\n list_of_regions = []\r\n queryset = Region.objects.filter(parent__isnull=True).order_by(\"name\")\r\n for region in queryset:\r\n list_of_regions.append((str(region.id), region.name))\r\n return list_of_regions", "def get_all_regions(self, region_names=None, filters=None):\r\n params = {}\r\n if region_names:\r\n self.build_list_params(params, region_names, 'RegionName')\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n regions = self.get_list('DescribeRegions', params,\r\n [('item', RegionInfo)], verb='POST')\r\n for region in regions:\r\n region.connection_cls = EC2Connection\r\n return regions", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def regions_by_tag(self, *tags: str) -> Iterable[str]:\n node = self.shards_xml(\"regionsbytag\", tags=\",\".join(tags))[\"regions\"]\n text = node.text if node.text else \"\"\n return text.split(\",\")", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def _get_global_table_all_regions(table_name: str) -> List[dict]:\n description = _describe_table(table_name=table_name)\n replicas = description['Table'].get('Replicas', [])\n return replicas", "def GetWorldRegions():\n return GetDataFromCsvFile('world_regions.csv')", "def get_regionlist(chosenmodel):\n regionlist = list(chosenmodel.regions.keys())\n [ regionlist.remove(key) for key in regionlist\n if type(chosenmodel.regions[key]) is dict ]\n return regionlist", "def test_api_regions(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load regions from url specified in api base\n r = requests.get(r['regions']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('regions', r)", "def vpc_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_region\")", "def get_aws_reserved_networks(region=None, all_regions=False):\n result = []\n if all_regions:\n for aws_region in get_aws_regions_list():\n for vpc in boto3.client('ec2', region_name=aws_region).describe_vpcs()['Vpcs']:\n result.append(vpc)\n else:\n result = boto3.client('ec2', region_name=region).describe_vpcs()['Vpcs']\n\n vpc_used_cidr_list = []\n for vpc in result:\n vpc_used_cidr_list.append(PyVPCBlock(network=ipaddress.ip_network(vpc['CidrBlock']),\n resource_id=vpc['VpcId'],\n name=get_aws_resource_name(vpc),\n resource_type='vpc'))\n return vpc_used_cidr_list", "def test_aws_service_api_availability_zones_get(self):\n pass", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def source_region_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_region_ids\")", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def DescribeAccessRegionsByDestRegion(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegionsByDestRegion\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsByDestRegionResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "async def describe_regions_async(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_regions_with_options_async(request, runtime)", "def operating_regions(self) -> pulumi.Output[Optional[Sequence['outputs.IpamOperatingRegion']]]:\n return pulumi.get(self, \"operating_regions\")", "def getRegions(self, clearCache=False):\n if clearCache:\n self._regionCache = None\n if self._regionCache is not None:\n return self._regionCache\n\n self.lock.acquire()\n\n regions = []\n self._regionsByName = {}\n\n # Iterate over all descriptors (even numbered regions)\n for index in range(0, MAX_REGIONS, 2):\n def storeDescriptor(descriptor, index=index):\n size = struct.unpack(\"<I\", descriptor[:4])[0]\n name = descriptor[4:].split('\\x00')[0]\n if name:\n region = Region(index + 1, size, name)\n regions.append(region)\n self._regionsByName[name] = region\n\n # Send the command the low-level way, since we already have the lock.\n self.recv.queue.put((MAX_DESCRIPTOR_LEN, storeDescriptor))\n self.send.queue.put(opSetRegion(index) + opReadLongs(MAX_DESCRIPTOR_LEN))\n\n self.recv.queue.join()\n self._regionCache = regions\n\n self.lock.release()\n return regions", "def get_regionlist_of_componentgroup(chosenmodel, componentgroup_name):\n return list(chosenmodel.regions[componentgroup_name].keys())", "def DescribeDestRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeDestRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeDestRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def get_region(self):\n return self.creds.get('region_name')", "def endpoint_group_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_group_region\")", "def load_all_countries(self):\n core = self.core\n regionNodes = core.load_children(self.META[\"Countries\"])\n countryNodes = []\n if regionNodes:\n for regionNode in regionNodes:\n if core.get_base_type(regionNode) == self.META[\"Region\"]:\n countryNodes += core.load_children(regionNode)\n return countryNodes\n else:\n print(\"There are no regions in the database\")", "def listPredefinedRegions(self):\n\n res = self._Client__proxy.listPredefinedRegions(\n self._Client__session)\n\n self.checkResult(res)\n return res[\"predefinedRegions\"]", "def region(self):\n return self.config.region", "def list_internet_gateways(\n profile_name: str = 'terraform',\n region_name: str = 'us-east-1'\n) -> [str]:\n session = boto3.Session(profile_name=profile_name)\n client = session.client(service_name='ec2', region_name=region_name)\n elements = client.describe_internet_gateways()\n _lines = []\n for element in elements['InternetGateways']:\n x = InternetGateway(element['InternetGatewayId'])\n x.vpc_id = element['Attachments'][0]['VpcId']\n x.tags = element['Tags']\n _lines.append(x)\n return _lines", "def get_zones(region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n return [z.name for z in conn.get_all_zones()]", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def azs_lookup(session, lambda_compatible_only=False):\n if session is None:\n return []\n\n client = session.client('ec2')\n response = client.describe_availability_zones()\n # SH Removing Hack as subnet A is already in Production and causes issues trying to delete\n # We will strip out subnets A and C when creating the lambdas.\n #rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"] if z['ZoneName'] != 'us-east-1a']\n rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"]]\n\n if lambda_compatible_only:\n current_account = get_account_id_from_session(session)\n for az in rtn.copy():\n if az[1] == 'c' and current_account == hosts.PROD_ACCOUNT:\n rtn.remove(az)\n if az[1] == 'a' and current_account == hosts.DEV_ACCOUNT:\n rtn.remove(az)\n return rtn", "def whitelist_regions(self):\n return getattr(self, '_do_whitelist_regions', False)", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def get_transcript_regions(self, transcriptId):\n\n regions = self.get_regions('exon')\n transcriptRegions = []\n for region in regions:\n if region.transcriptId == transcriptId:\n transcriptRegions.append(region)\n return transcriptRegions", "def get_region_services(self,format=None):\n clients = HWIOS.pb_server.get_clients()\n region_services = []\n for client in clients:\n region_services.extend(client.region_services)\n #for django forms\n if format == 'tuple':\n tuple_list = []\n for region_service in region_services:\n tuple_list.append((region_service['uuid'],region_service['name']))\n return tuple_list\n return region_services", "def region(self):\n return self._get(\"region\")", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def operating_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpamOperatingRegionArgs']]]]:\n return pulumi.get(self, \"operating_regions\")", "def get(self, request):\n conn = get_sdk_connection(request)\n availability_zone_list = _sdk_object_to_list(\n conn.load_balancer.availability_zones()\n )\n\n return {'items': availability_zone_list}", "def children(self) -> List[Region]:\n return []", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def rendered_regions(self, obj):\n return obj.render_json(self.context.get('request'))", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def load_aws_region_name() -> str:\n session = boto3.session.Session()\n region_name = (\n click.get_current_context().params.get(\"region\") or session.region_name\n )\n return region_name", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def get_all_zones():\n cf = CloudFlare.CloudFlare(raw=True)\n page_number = 0\n total_pages = 1\n all_zones = []\n while page_number < total_pages:\n page_number += 1\n raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})\n zones = raw_results['result']\n all_zones += zones\n total_pages = raw_results['result_info']['total_pages']\n return all_zones", "def get_bucketlist():\n pass", "def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]", "def ReadRegions(self, fname=\"temp\"):\n self.fname = fname\n command = \"\"\"IPython.notebook.kernel.execute('file = open(\"temp\", \"w\"); [file.write(x[\"wcsstr\"]) for x in '+ JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})) +']; file.close()');\"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def region(self) -> str:\n return pulumi.get(self, \"region\")" ]
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.728351", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", "0.65845656", "0.65719104", "0.65711975", "0.65673536", "0.64892554", "0.64801663", "0.6439613", "0.6420435", "0.6401047", "0.64007866", "0.639505", "0.6384996", "0.6370646", "0.63652235", "0.6356448", "0.62927395", "0.627321", "0.61878794", "0.61749566", "0.6168414", "0.6155335", "0.608255", "0.6062303", "0.6050333", "0.6031984", "0.60035217", "0.5989563", "0.5983013", "0.591161", "0.5883922", "0.5882775", "0.5842723", "0.58391607", "0.5813007", "0.5810873", "0.5798928", "0.5797396", "0.57929695", "0.57811475", "0.57782423", "0.5777307", "0.5755902", "0.57420397", "0.5738982", "0.5728668", "0.5728234", "0.5718206", "0.5695173", "0.5688639", "0.5686243", "0.56831473", "0.5667013", "0.56665355", "0.5612399", "0.5612315", "0.5601767", "0.5580547", "0.5580073", "0.55715996", "0.5556764", "0.5535209", "0.5527436", "0.5499612", "0.5489355", "0.54797554", "0.5470251", "0.5467954", "0.5465599", "0.54629457", "0.5452229", "0.5449347", "0.5449347", "0.5448797", "0.5441852", "0.54281044", "0.54222935", "0.54175586", "0.5406034", "0.5404364", "0.54041964" ]
0.7289586
4
Gets all AWS regions that Krux can access
def get_valid_regions(self): client = self._boto.client('ec2') regions = [] for region in client.describe_regions().get('Regions', []): if getattr(RegionCode.Region, region.get('RegionName'), None) is not None: regions.append(RegionCode.Region[region.get('RegionName')]) else: regions.append(region.get('RegionName')) return regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_valid_regions(self):\n conn = self._boto.ec2.connect_to_region(self.cli_region)\n\n regions = []\n for region in conn.get_all_regions():\n if getattr(RegionCode.Region, region.name, None) is not None:\n regions.append(RegionCode.Region[region.name])\n else:\n regions.append(region.name)\n\n return regions", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def test_aws_service_api_regions_get(self):\n pass", "def get_regions(self):\n return self._regions", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def regions(self, member_state):\n rates = self._get_rates(member_state)\n return list(rates.regions.keys())", "def regions(self):\n return self._regions", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def filter_regions(self):\n return self.filter_nodes('/DistrictBuilder/Regions/Region')", "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def get_regions(self,online=False):\n clients = HWIOS.pb_server.get_clients()\n regions = []\n for client in clients:\n for service in client.region_services:\n if online: \n if service['status'] == 'ON':\n for region in service['regions']:\n regions.append(region)\n else:\n for region in service['regions']:\n region['status'] = service['status']\n regions.append(region)\n return regions", "def regions(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleRegionResult']]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def region_clients(self, **kwargs):\n return stats.region_clients(self._host, self._session, **kwargs)", "def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions", "def get_regions(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self.R", "def get_valid_regions(self):\n pass", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def scope(self) -> List[Region]:\n return [self]", "def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def DescribeAccessRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_all_in_region(self, cloud_account_id: str, region_id: str) -> List[Dict]:\n\t\tquery_parameters = {'cloudAccountId': cloud_account_id, 'regionId': region_id}\n\t\treturn self._get(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, params=query_parameters)", "def get_regions():\n\n # Also known as the 'climbing directory'\n route_guide = urlopen('https://www.mountainproject.com/route-guide',\n context=ctx)\n # Opens HTML\n region_html = route_guide.read()\n # Parses HTML with BS package\n region_soup = BeautifulSoup(region_html, 'html.parser')\n # Finds regions area of the page\n regions = region_soup.find('div', id='route-guide')\\\n .find_all('div', class_='mb-half')\n\n for region in regions:\n # Link to region area guide\n url = region.find('a')['href']\n # English name of region\n region_name = region.find('a').get_text()\n # Writes region name and url to Areas DB. This gives the region a\n # unique id automatically\n cursor.execute('''\n INSERT INTO Areas(url, name)\n VALUES ('%s', '%s')\n ON CONFLICT DO NOTHING\n ''' % (url, region_name))\n # Commits to DB\n conn.commit()", "def region(self):\n return [node.region for node in self]", "def region(self):\n return regions.lookup(self.state)", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def get_db_regions(self, context, regions):\n regions_objs = self.dns_manager.get_db_regions(context, regions)\n return regions_objs", "def scope(self) -> List[Region]:\n return self._scope", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def describe_regions(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_regions_with_options(request, runtime)", "def lookups(self, request, model_admin):\r\n list_of_regions = []\r\n queryset = Region.objects.filter(parent__isnull=True).order_by(\"name\")\r\n for region in queryset:\r\n list_of_regions.append((str(region.id), region.name))\r\n return list_of_regions", "def get_all_regions(self, region_names=None, filters=None):\r\n params = {}\r\n if region_names:\r\n self.build_list_params(params, region_names, 'RegionName')\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n regions = self.get_list('DescribeRegions', params,\r\n [('item', RegionInfo)], verb='POST')\r\n for region in regions:\r\n region.connection_cls = EC2Connection\r\n return regions", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def regions_by_tag(self, *tags: str) -> Iterable[str]:\n node = self.shards_xml(\"regionsbytag\", tags=\",\".join(tags))[\"regions\"]\n text = node.text if node.text else \"\"\n return text.split(\",\")", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def _get_global_table_all_regions(table_name: str) -> List[dict]:\n description = _describe_table(table_name=table_name)\n replicas = description['Table'].get('Replicas', [])\n return replicas", "def GetWorldRegions():\n return GetDataFromCsvFile('world_regions.csv')", "def get_regionlist(chosenmodel):\n regionlist = list(chosenmodel.regions.keys())\n [ regionlist.remove(key) for key in regionlist\n if type(chosenmodel.regions[key]) is dict ]\n return regionlist", "def test_api_regions(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load regions from url specified in api base\n r = requests.get(r['regions']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('regions', r)", "def vpc_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_region\")", "def get_aws_reserved_networks(region=None, all_regions=False):\n result = []\n if all_regions:\n for aws_region in get_aws_regions_list():\n for vpc in boto3.client('ec2', region_name=aws_region).describe_vpcs()['Vpcs']:\n result.append(vpc)\n else:\n result = boto3.client('ec2', region_name=region).describe_vpcs()['Vpcs']\n\n vpc_used_cidr_list = []\n for vpc in result:\n vpc_used_cidr_list.append(PyVPCBlock(network=ipaddress.ip_network(vpc['CidrBlock']),\n resource_id=vpc['VpcId'],\n name=get_aws_resource_name(vpc),\n resource_type='vpc'))\n return vpc_used_cidr_list", "def test_aws_service_api_availability_zones_get(self):\n pass", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def source_region_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_region_ids\")", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def DescribeAccessRegionsByDestRegion(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAccessRegionsByDestRegion\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAccessRegionsByDestRegionResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "async def describe_regions_async(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_regions_with_options_async(request, runtime)", "def operating_regions(self) -> pulumi.Output[Optional[Sequence['outputs.IpamOperatingRegion']]]:\n return pulumi.get(self, \"operating_regions\")", "def getRegions(self, clearCache=False):\n if clearCache:\n self._regionCache = None\n if self._regionCache is not None:\n return self._regionCache\n\n self.lock.acquire()\n\n regions = []\n self._regionsByName = {}\n\n # Iterate over all descriptors (even numbered regions)\n for index in range(0, MAX_REGIONS, 2):\n def storeDescriptor(descriptor, index=index):\n size = struct.unpack(\"<I\", descriptor[:4])[0]\n name = descriptor[4:].split('\\x00')[0]\n if name:\n region = Region(index + 1, size, name)\n regions.append(region)\n self._regionsByName[name] = region\n\n # Send the command the low-level way, since we already have the lock.\n self.recv.queue.put((MAX_DESCRIPTOR_LEN, storeDescriptor))\n self.send.queue.put(opSetRegion(index) + opReadLongs(MAX_DESCRIPTOR_LEN))\n\n self.recv.queue.join()\n self._regionCache = regions\n\n self.lock.release()\n return regions", "def get_regionlist_of_componentgroup(chosenmodel, componentgroup_name):\n return list(chosenmodel.regions[componentgroup_name].keys())", "def DescribeDestRegions(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeDestRegions\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeDestRegionsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def get_region(self):\n return self.creds.get('region_name')", "def endpoint_group_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_group_region\")", "def load_all_countries(self):\n core = self.core\n regionNodes = core.load_children(self.META[\"Countries\"])\n countryNodes = []\n if regionNodes:\n for regionNode in regionNodes:\n if core.get_base_type(regionNode) == self.META[\"Region\"]:\n countryNodes += core.load_children(regionNode)\n return countryNodes\n else:\n print(\"There are no regions in the database\")", "def listPredefinedRegions(self):\n\n res = self._Client__proxy.listPredefinedRegions(\n self._Client__session)\n\n self.checkResult(res)\n return res[\"predefinedRegions\"]", "def region(self):\n return self.config.region", "def list_internet_gateways(\n profile_name: str = 'terraform',\n region_name: str = 'us-east-1'\n) -> [str]:\n session = boto3.Session(profile_name=profile_name)\n client = session.client(service_name='ec2', region_name=region_name)\n elements = client.describe_internet_gateways()\n _lines = []\n for element in elements['InternetGateways']:\n x = InternetGateway(element['InternetGatewayId'])\n x.vpc_id = element['Attachments'][0]['VpcId']\n x.tags = element['Tags']\n _lines.append(x)\n return _lines", "def get_zones(region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n return [z.name for z in conn.get_all_zones()]", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def azs_lookup(session, lambda_compatible_only=False):\n if session is None:\n return []\n\n client = session.client('ec2')\n response = client.describe_availability_zones()\n # SH Removing Hack as subnet A is already in Production and causes issues trying to delete\n # We will strip out subnets A and C when creating the lambdas.\n #rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"] if z['ZoneName'] != 'us-east-1a']\n rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"]]\n\n if lambda_compatible_only:\n current_account = get_account_id_from_session(session)\n for az in rtn.copy():\n if az[1] == 'c' and current_account == hosts.PROD_ACCOUNT:\n rtn.remove(az)\n if az[1] == 'a' and current_account == hosts.DEV_ACCOUNT:\n rtn.remove(az)\n return rtn", "def whitelist_regions(self):\n return getattr(self, '_do_whitelist_regions', False)", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def get_transcript_regions(self, transcriptId):\n\n regions = self.get_regions('exon')\n transcriptRegions = []\n for region in regions:\n if region.transcriptId == transcriptId:\n transcriptRegions.append(region)\n return transcriptRegions", "def get_region_services(self,format=None):\n clients = HWIOS.pb_server.get_clients()\n region_services = []\n for client in clients:\n region_services.extend(client.region_services)\n #for django forms\n if format == 'tuple':\n tuple_list = []\n for region_service in region_services:\n tuple_list.append((region_service['uuid'],region_service['name']))\n return tuple_list\n return region_services", "def region(self):\n return self._get(\"region\")", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def operating_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpamOperatingRegionArgs']]]]:\n return pulumi.get(self, \"operating_regions\")", "def get(self, request):\n conn = get_sdk_connection(request)\n availability_zone_list = _sdk_object_to_list(\n conn.load_balancer.availability_zones()\n )\n\n return {'items': availability_zone_list}", "def children(self) -> List[Region]:\n return []", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def rendered_regions(self, obj):\n return obj.render_json(self.context.get('request'))", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def source_region_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_region_ids\")", "def load_aws_region_name() -> str:\n session = boto3.session.Session()\n region_name = (\n click.get_current_context().params.get(\"region\") or session.region_name\n )\n return region_name", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def get_all_zones():\n cf = CloudFlare.CloudFlare(raw=True)\n page_number = 0\n total_pages = 1\n all_zones = []\n while page_number < total_pages:\n page_number += 1\n raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})\n zones = raw_results['result']\n all_zones += zones\n total_pages = raw_results['result_info']['total_pages']\n return all_zones", "def get_bucketlist():\n pass", "def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]", "def ReadRegions(self, fname=\"temp\"):\n self.fname = fname\n command = \"\"\"IPython.notebook.kernel.execute('file = open(\"temp\", \"w\"); [file.write(x[\"wcsstr\"]) for x in '+ JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})) +']; file.close()');\"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def region(self) -> str:\n return pulumi.get(self, \"region\")" ]
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.7289586", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", "0.65845656", "0.65719104", "0.65711975", "0.65673536", "0.64892554", "0.64801663", "0.6439613", "0.6420435", "0.6401047", "0.64007866", "0.639505", "0.6384996", "0.6370646", "0.63652235", "0.6356448", "0.62927395", "0.627321", "0.61878794", "0.61749566", "0.6168414", "0.6155335", "0.608255", "0.6062303", "0.6050333", "0.6031984", "0.60035217", "0.5989563", "0.5983013", "0.591161", "0.5883922", "0.5882775", "0.5842723", "0.58391607", "0.5813007", "0.5810873", "0.5798928", "0.5797396", "0.57929695", "0.57811475", "0.57782423", "0.5777307", "0.5755902", "0.57420397", "0.5738982", "0.5728668", "0.5728234", "0.5718206", "0.5695173", "0.5688639", "0.5686243", "0.56831473", "0.5667013", "0.56665355", "0.5612399", "0.5612315", "0.5601767", "0.5580547", "0.5580073", "0.55715996", "0.5556764", "0.5535209", "0.5527436", "0.5499612", "0.5489355", "0.54797554", "0.5470251", "0.5467954", "0.5465599", "0.54629457", "0.5452229", "0.5449347", "0.5449347", "0.5448797", "0.5441852", "0.54281044", "0.54222935", "0.54175586", "0.5406034", "0.5404364", "0.54041964" ]
0.728351
5
Initializes a read operation from Debezium.
def __init__( self, connector_class, username, password, host, port, max_number_of_records=None, connection_properties=None, expansion_service=None): self.params = ReadFromDebeziumSchema( connector_class=connector_class.value, username=username, password=password, host=host, port=port, max_number_of_records=max_number_of_records, connection_properties=connection_properties) self.expansion_service = expansion_service or default_io_expansion_service()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.reader = reader.Reader()", "def read(self):\n pass", "def read(self):", "def read(self):\n raise NotImplementedError", "def read():\n # TODO", "def read(self):\n pass", "def read(self, **kwargs):\n pass", "def _read_data(self):", "def read(self, *args, **kwargs):\n pass", "def Read(self, *args, **kwargs):\n pass", "def __init__ (self, istream) :\r\n ReaderA.__init__(self) # call parent\r\n self.is_ = istream\r\n self.cached_ = CircularBuffer(132, True)", "def __init__(self, run):\n dict.__init__(self)\n self.read(run)", "def read(self):\n self.attributes = self.call('READ', expect=error.OK)", "def __init__(self, filename, mode, version='2.0'):\n if mode.startswith('r'):\n try:\n with open(filename, 'r') as f:\n firstline = next(f)\n if not firstline.startswith('#INPORB'):\n raise InvalidRequest\n except:\n raise InvalidRequest\n\n self.f = open(filename, mode)\n\n if mode.startswith('r'):\n line = self.seek_line('#INPORB')\n self.version = line.split()[1]\n self.seek_line('#INFO')\n uhf, self.n_sym, self.wfn_type = (int(val) for val in self._next_noncomment().split())\n if uhf == 1:\n self.unrestricted = True\n else:\n self.unrestricted = False\n self.n_bas = np.array(self._next_noncomment().split(), dtype=np.int)\n self.n_orb = np.array(self._next_noncomment().split(), dtype=np.int)\n elif mode.startswith('w'):\n self.version = version\n else:\n raise Exception('invalid mode string')\n\n if self.version == '2.0':\n self.read_block = self._read_block_v20\n self.occ_fmt = ' {:7.4f}'\n self.one_fmt = ' {:11.4e}'\n self.orb_fmt = ' {:21.14e}'\n self.occ_blk_size = 10\n self.one_blk_size = 10\n self.orb_blk_size = 5\n elif self.version == '1.1':\n self.read_block = self._read_block_v11\n self.occ_fmt = '{:18.11e}'\n self.one_fmt = '{:18.11e}'\n self.orb_fmt = '{:18.11e}'\n self.occ_blk_size = 4\n self.one_blk_size = 4\n self.orb_blk_size = 4\n else:\n raise Exception('invalid version number')", "def init_edra(self) -> None:\n ...", "def __init__ (self, input) :\r\n ReaderA.__init__(self) # call parent\r\n # print '************************* input = ', input, type(input)\r\n self.buffer_ = input # this is any thing that can be indexed\r\n self.current_ = 0", "def __init__(self):\n self.read_input()\n self.update_binaries()", "def __init__(self, starting_point=-1):\n self.i_read = starting_point\n self.data = [['fake_chip_id', 'fake_version'],\n [96, 110, 203, 104, 50, 0, 29, 145, 59, 215, 208, 11,\n 232, 38, 42, 255, 249, 255, 172, 38, 10, 216, 189, 16],\n [75],\n [129, 1, 0, 16, 44, 3, 30],\n [76, 60, 128, 129, 49, 128, 94, 120]]", "def read(cls, proto):\n pass", "def readOneData(self):\n\t\tpass", "def read_data(self):\n raise NotImplementedError", "def _read_into_buffer(self):\n raise NotImplementedError()", "def __init__(self, ase):\n self.bin = open(ase, 'rb')", "def read(self) -> bytes | None:", "def read(self) -> bytes | None:", "def read(self, size=-1):\n ...", "def init(self) -> None:", "def ReadObject(self, *args, **kwargs):\n pass", "def getReader(self):\n return Tes3Reader(self.inName,cStringIO.StringIO(self.data))", "def __init__(self, data=b''):\n self.data = data\n self.offset = 0", "def __init__(self, name):\n self.fisica = fisica(name)\n self.rx = RX(self.fisica)\n self.tx = TX(self.fisica)\n self.connected = False", "def _read(self, in_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n self.setname = in_file.read(6).decode().strip()\n self.value = float(in_file.read(12))\n self.numnod = int(in_file.read(12))\n self.text = in_file.read(20).decode().strip()\n self.ictype = int(in_file.read(2))\n self.numstep = int(in_file.read(5))\n self.analys = in_file.read(10).decode().strip()\n self.format = int(in_file.read(2))\n in_file.read(1) # eol\n\n in_file.read(1) # pad byte\n in_file.read(2) # key = -4\n in_file.read(2) # pad bytes\n self.name = in_file.read(8).decode().strip()\n self.ncomps = int(in_file.read(5))\n self.irtype = int(in_file.read(5))\n if self.irtype != 1:\n raise NotImplementedError()\n in_file.read(1) # eol\n\n for i in range(self.ncomps):\n entity = FRDEntity()\n self.entities.append(entity)\n\n in_file.read(1) # pad byte\n entity.key = int(in_file.read(2))\n in_file.read(2) # pad bytes\n entity.name = in_file.read(8).decode().strip()\n entity.menu = int(in_file.read(5))\n entity.ictype = int(in_file.read(5))\n entity.icind1 = int(in_file.read(5))\n if entity.ictype == 4:\n entity.icind2 = int(in_file.read(5))\n elif entity.ictype == 2 and i == 3:\n entity.icind2 = int(in_file.read(5))\n entity.iexist = int(in_file.read(5))\n entity.icname = in_file.read(3).decode().strip()\n self.ncomps -= 1\n else:\n entity.iexist = int(in_file.read(5))\n in_file.read(1) # eol\n\n for i in range(self.numnod):\n result = FRDNodeResult()\n self.results.append(result)\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n result.data = []\n for j in range(num_lines):\n in_file.read(3) # pad byte and key = -1 || -2\n if result.node is None:\n result.node = int(in_file.read(5*(self.format+1)))\n else:\n in_file.read(5*(self.format+1))\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for _ in range(0, k_end):\n result.data.append(float(in_file.read(12)))\n in_file.read(1) # eol\n else:\n result.node = struct.unpack('i', in_file.read(4))[0]\n result.data = struct.unpack(\n 'f'*self.ncomps, in_file.read(self.ncomps*4))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def read(self) -> int:\n ...", "def __init__(self, *args, **kwargs):\n super(Decoder, self).__init__(*args, **kwargs)\n self.stream.seek(0)\n self._code = 0", "def __init__(self, fileref):\n self.__ref = fileref\n self.__lib = _zlib.decompressobj(memLevel=9)\n self.__buf = b''", "def test_fooreader():\n from .context import readersender\n\n fr = readersender.readers.FooReader()\n\n fr.connect()\n fr.read()\n fr.disconnect()", "def __init__(self):\n self.reader = vtk.vtkImageData()\n\n self.dims = self.reader.GetDimensions()\n self.bounds = self.reader.GetBounds()\n self.spacing = self.reader.GetSpacing()\n self.origin = self.reader.GetOrigin()\n self.value_range = self.reader.GetScalarRange()\n\n # self.plane_widget_x = vtk.vtkImagePlaneWidget()\n # self.plane_widget_y = vtk.vtkImagePlaneWidget()\n # self.plane_widget_z = vtk.vtkImagePlaneWidget()\n\n self.flag_read = False", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def __init__(self, data):\n self.data = data\n self.writing = False\n self.readerCount = 0\n self.okToRead = Condition()\n self.okToWrite = Condition()", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def __init__(self, filename='', use_cython=True, raw=None):\n \n if raw and not filename:\n # Load raw data exterinally\n self.raw = raw\n return\n \n # Defaults to cython reader if user selects it\n if use_cython and cython_loaded:\n self.raw = _reader.Read(filename)\n\n # Python reader for debug purposes\n else:\n self.raw = PythonReader.Read(filename)", "def __init__(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n NexusReaderBase.__init__(self, -1)\n self.taxa = None\n self._data_matrices = None", "def _OpenRead(self):\n has_storage_metadata = self._ReadStorageMetadata()\n if not has_storage_metadata:\n # TODO: remove serializer.txt stream support in favor\n # of storage metadata.\n if self._read_only:\n logging.warning('Storage file does not contain a metadata stream.')\n\n stored_serialization_format = self._ReadSerializerStream()\n if stored_serialization_format:\n self.serialization_format = stored_serialization_format\n\n if self.serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise IOError('Unsupported serialization format: {0:s}'.format(\n self.serialization_format))\n\n self._serializer = json_serializer.JSONAttributeContainerSerializer\n\n for container_type, stream_name_prefix in (\n self._STREAM_NAME_PREFIXES.items()):\n stream_name_prefix = '{0:s}_data.'.format(stream_name_prefix)\n self._last_stream_numbers[container_type] = self._GetLastStreamNumber(\n stream_name_prefix)\n\n self._analysis_report_stream_number = self._GetLastStreamNumber(\n 'analysis_report_data.')\n self._last_preprocess = self._GetLastStreamNumber('preprocess.')\n\n last_session_start = self._GetLastStreamNumber('session_start.')\n last_session_completion = self._GetLastStreamNumber('session_completion.')\n\n # TODO: handle open sessions.\n if last_session_start != last_session_completion:\n logging.warning('Detected unclosed session.')\n\n self._last_session = last_session_completion\n\n last_task_start = self._GetLastStreamNumber('task_start.')\n last_task_completion = self._GetLastStreamNumber('task_completion.')\n\n # TODO: handle open tasks.\n if last_task_start != last_task_completion:\n logging.warning('Detected unclosed task.')\n\n self._last_task = last_task_completion", "def __init__(self, blob=None):\n if blob is None:\n self.versionCode = '0'\n self.data = {}\n else:\n self.versionCode = blob[0]\n encoded = blob[1:]\n compressed = base64.b64decode(encoded)\n self.data = json.loads(zlib.decompress(compressed))", "def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs):\n super(FileComm, self)._init_before_open(**kwargs)\n # Process file class keywords\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n # Put string attributes in the correct format\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))", "def reader(self):\n return self._r", "def __init__(self, in_file=None):\n self.key = 3\n self.code = 'C'\n self.numelem = None\n self.format = None\n self.elems = []\n if in_file is not None:\n self._read(in_file)", "def __init__(self, buf=None):\n if buf:\n self.unpack(buf)", "def __init__(self):\n self.buffer = bytearray()", "def read(cls, reader: UFOReader) -> \"Info\":\n self = cls()\n reader.readInfo(self)\n return self", "def read(self):\r\n return RecordIO.Reader.do_read(self._fp, self._codec)", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n logger.debug(\"SlokaReader: Initialize\")\n super(SlokaReader, self).__init__(*args, **kwargs)", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._settings = {}", "def _readtag(self):\n tag = Tag()\n tag.tag = self.reader.readint(1)\n tag.len = self.reader.readint(2)\n\n if tag.len > 0:\n tag.data = self.reader.read(tag.len)\n return tag", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def read(self) -> Optional[bytes]:", "def __init__(self, geo_model=None):\n self.rex_bytes = bytearray()\n self.n_bytes = 0\n\n self.data_id = 0\n self.geo_model = geo_model", "def read(self, read):\n\n self._read = read", "def __init__(self, filename, offset):\r\n self.__input__ = open(filename, 'rb')\r\n self.__input__.seek(offset, FROM_START)", "def _init(self):\n raise NotImplementedError", "async def read(self, *, decode: bool = ...) -> bytes:\n ...", "def read(self) -> bytes:\n pass", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def _read_v1(self):\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.interface, self.usb_rd_timeout)", "def read_raw_data(self):\n # Must be set by the user\n raise Exception(\"not implemented\")", "def load(self,ins=None,unpack=False):\n name = self.name\n #--Read, but don't analyze.\n if not unpack:\n self.data = ins.read(self.size,name)\n #--Read and analyze ins.\n elif ins:\n inPos = ins.tell()\n self.loadData(ins)\n ins.seek(inPos,0,name+'_REWIND')\n self.data = ins.read(self.size,name)\n #--Analyze internal buffer.\n else:\n reader = Tes3Reader(self.inName,cStringIO.StringIO(self.data))\n self.loadData(reader)\n reader.close()", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, path, bigendian=False, use_mmap=None):\n self.rd = DictReader(path, bigendian, use_mmap)\n with self.rd as r:\n node_size = r.get_int()\n tind_size = r.get_int()\n tail_size = r.get_int()\n self.num_keys = tind_size\n self.begs = r.get_intarray(tind_size)\n self.base = r.get_intarray(node_size)\n self.lens = r.get_shortarray(tind_size)\n self.chck = r.get_chararray(node_size)\n self.tail = r.get_chararray(tail_size)", "def __init__(self, in_file=None):\n self.key = 2\n self.code = 'C'\n self.numnod = None\n self.format = None\n self.nodes = []\n if in_file is not None:\n self._read(in_file)", "def __init__(self, fd=None):", "def __init__(self, handle, in_read=False, fmt=None, start=None, end=None):\n self._start = start\n self._end = end\n\n if self._start:\n self._start -= 1\n\n if not fmt:\n if not in_read:\n self._get_barcode = _get_barcode[guess_header_format(handle)]\n else:\n self._get_barcode = _get_barcode['unknown']\n else:\n self._get_barcode = _get_barcode[fmt]", "def __init__(self, buff):\n fmt = 'hiSi'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n\n error_code = response[0]\n if error_code != 0:\n self.raise_error(error_code, response)\n self.coordinator_id = response[1]\n self.coordinator_host = response[2]\n self.coordinator_port = response[3]", "def __init__(self, address, ap):\n super(ReadRequest, self).__init__(address=address, ap=ap)", "def read_drt(self):\n data = Array('B')\n data = self.read(0, 0, 8)\n num_of_devices = drt_controller.get_number_of_devices(data)\n len_to_read = num_of_devices * 8\n\n data = self.read(0, 0, len_to_read + 8)\n self.drt_manager.set_drt(data)", "def __init__(self, resource, read_header=True, dialect=None, encoding=None,\r\n detect_header=False, sample_size=200, skip_rows=None,\r\n empty_as_null=True,fields=None, **reader_args):\r\n self.read_header = read_header\r\n self.encoding = encoding\r\n self.detect_header = detect_header\r\n self.empty_as_null = empty_as_null\r\n \r\n self.sample_size = sample_size\r\n self.resource = resource\r\n self.reader_args = reader_args\r\n self.reader = None\r\n self.dialect = dialect\r\n \r\n self.close_file = False\r\n self.skip_rows = skip_rows\r\n self.fields = fields", "def run(self):\n self.read_from_serial()" ]
[ "0.6265758", "0.6161929", "0.6149388", "0.60283214", "0.5988332", "0.5961205", "0.5949067", "0.59235144", "0.5830563", "0.575123", "0.5719261", "0.5699496", "0.5671507", "0.5631414", "0.56239974", "0.5621243", "0.5612608", "0.56093657", "0.5547174", "0.5521234", "0.55149037", "0.5507824", "0.5483486", "0.5464175", "0.5464175", "0.54427046", "0.5440683", "0.5439948", "0.541617", "0.5403133", "0.53983516", "0.5396081", "0.53952724", "0.5391794", "0.5365479", "0.53627956", "0.5361399", "0.536136", "0.53526133", "0.5335213", "0.5334665", "0.53319633", "0.5319513", "0.53171915", "0.53169286", "0.5304742", "0.5301851", "0.52874124", "0.5285689", "0.5282391", "0.5280635", "0.5260751", "0.5260751", "0.5258697", "0.525753", "0.52561957", "0.52554107", "0.5252598", "0.52482426", "0.52474135", "0.52424556", "0.5233771", "0.52280074", "0.52196074", "0.52192765", "0.520859", "0.520859", "0.520859", "0.520859", "0.520859", "0.520859", "0.520859", "0.520859", "0.5207709", "0.5202779", "0.5202716", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.5190658", "0.51766807", "0.51660305", "0.51636213", "0.5163162", "0.5160358", "0.5159358", "0.51589483", "0.5156811", "0.5150385" ]
0.0
-1
Extract plastic class label from Image Name and return it
def ExtractLabel(ImgName): # Each img has name notation "*****a0X*" where X is PlasticType PlasticType = ImgName[7] return { '1': 0, # PET '2': 1, # HDPE '4': 2, # LDPE '5': 3, # PP '6': 4, # PS '7': 5, # Other }[PlasticType]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label", "def name_to_label(self, name):\n return self.classes[name]", "def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score", "def ocr_core_names(img):\n text = pytesseract.image_to_string(\n img,\n lang='eng',\n config='--psm 7 --oem 3'\n )\n return text", "def get_classification(self, image):\n # Image pre-processing pipeline\n img = cv2.resize(image, None, fx=0.5, fy=0.5)\n img = img.astype(np.float32)\n img = keras.applications.vgg16.preprocess_input(img)\n # Execute prediction\n probs = self.model.predict(np.array([img]), batch_size=1, verbose=1)[0]\n # get label with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n label = self.predictionary[g_x]\n rospy.loginfo(\"label: %d, conf: %f, %f, %f, %f\", g_x, probs[0], probs[1], probs[2], probs[3])\n return label", "def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")", "def overlay_class_names(self, image, predictions):\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[int(i)] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image", "def _get_label(cls, file_name):\n if cls == \"neg\":\n return \"0\"\n else:\n return \"1\"\n # reg = _REGEX_\n # rmtch = reg.match(file_name)\n # if rmtch:\n # return rmtch.groupdict()[\"label\"]\n # else:\n # return \"unknown_positive\"", "def GetImageLabelFromImage(image, parent=None):\n pixmap = GetPixelMapFromImage(image)\n return GetImageLabelFromPixelMap(pixmap, parent=parent)", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def show_class_name(img, pos, class_str, font_scale=0.35):\n\n img = img.astype(np.uint8)\n x0, y0 = int(pos[0]), int(pos[1])\n \n # Compute text size.\n txt = class_str\n font = cv2.FONT_HERSHEY_SIMPLEX\n ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)\n \n # Place text background.\n back_tl = x0, y0 - int(1.3 * txt_h)\n back_br = x0 + txt_w, y0\n cv2.rectangle(img, back_tl, back_br, _GREEN, -1)\n \n # Show text.\n txt_tl = x0, y0 - int(0.3 * txt_h)\n cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)\n return img", "def get_classname(self):\n return 'ImageFilm'", "def overlay_class_names(image, predictions):\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels_text = [CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n img_h, img_w, _ = image.shape\n\n template = \"{} {}: {:.2f}\"\n abv_map = {\n 'annotation_image': 'am',\n 'annotation_text': 'at',\n 'event_image': 'em',\n 'event_text': 'et',\n 'main_body': 'mb'\n }\n for c_label in DRAW_ORDER:\n for b_id, (box, score, label, label_text) in enumerate(zip(boxes, scores, labels, labels_text)):\n if label != c_label:\n continue\n x, y = box[:2]\n color = CATEGORIES_COLOR[label]\n label_text = abv_map.get(label_text, label_text)\n s = template.format(b_id, label_text, score)\n fontScale = img_w / 2000.0\n (text_width, text_height) = cv2.getTextSize(s, cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontScale, thickness=1)[0]\n box_coords = ((x, y), (x + text_width - 2, y - text_height - 2))\n cv2.rectangle(image, box_coords[0], box_coords[1], color, cv2.FILLED)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 1\n )\n\n return image", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def _parse_classification(self, item):\n full_name = item.css('td[headers=Name]::text').extract_first()\n\n if \"Metra\" in full_name and \"Board Meeting\" in full_name:\n return BOARD\n elif \"Citizens Advisory\" in full_name:\n return ADVISORY_COMMITTEE\n elif \"Committee Meeting\" in full_name:\n return COMMITTEE\n else:\n return NOT_CLASSIFIED", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def getImageLabels(bucket, key):\n client = boto3.client('rekognition')\n resp = client.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': key\n }\n }\n )\n\n output = []\n # I'm assuming that we only need the name labels to return to the customer. \n for label in resp['Labels']:\n output.append(label['Name'])\n return output", "def getImageName(self):\r\n return self.imageName", "def get_classification(self, image):\n # Run inference on image\n prediction = None\n prediction = inferOnImage(self.sess, self.model_logits, self.X, image)\n\n # Convert number into label just for debug\n prediction_label = None\n if prediction[0] == 0:\n prediction_label = \"RED\"\n elif prediction[0] == 1:\n prediction_label = \"GREEN\"\n elif prediction[0] == 2:\n prediction_label = \"NOLIGHT\"\n\n # Log the message\n rospy.loginfo(\"The label returned is %s\", prediction_label)\n\n # Return Unknown for now\n return TrafficLight.UNKNOWN", "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]", "def _ImageName(self, image):\n\n image_without_protocol = image.split('/')[-1]\n if '@' in image_without_protocol:\n return image_without_protocol.split('@')[0]\n elif ':' in image:\n return image_without_protocol.split(':')[0]\n else:\n return image_without_protocol", "def get_classification(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (360,270), interpolation = cv2.INTER_CUBIC)\n\n with self.graph.as_default():\n result = self.model.predict(image[None, :, :, :], batch_size=1).squeeze()\n id = np.argmax(result)\n return id, result[id]", "def label_image(image):\n \n #Label the blobs using ndimage\n labeled_blobs, n_features = ndimage.label(b_image)\n \n #calculate the center of mass of each labelled feature\n centers = ndimage.center_of_mass(b_image, labeled_blobs, np.arange(n_features) + 1)\n \n return labeled_blobs, n_features, centers", "def classify_image(img_path: str, model=None, pretrained_state_path: str = None):\n if model is None:\n if pretrained_state_path is None:\n model = models.vgg16(pretrained=True)\n else:\n state_dict = torch.load(pretrained_state_path)\n model = models.vgg16()\n model.load_state_dict(state_dict)\n img = preprocess_image(img_path)\n output = model(img)\n # Getting the max of the soft max layer.\n prediction = output.data.numpy().argmax()\n return labels[prediction]", "def get_label(filename:str) -> str:\n label = filename.split(\"/\")[-2]\n return label", "def get_classification(self, image):\n\n imrs = cv2.resize(image, (64, 64)) \n imrs = imrs.astype(float)\n imrs = imrs / 255.0\n \n imrs = imrs[newaxis, :, :, :]\n\n with self.graph.as_default():\n preds = self.model.predict(imrs)\n \n predicted_class = np.argmax(preds, axis=1)\n\n choices = {0: TrafficLight.RED,\n 1: TrafficLight.YELLOW,\n 2: TrafficLight.GREEN,\n 3: TrafficLight.UNKNOWN}\n return choices.get(predicted_class[0], TrafficLight.GREEN)", "def get_nickname(image):\r\n extension = len(image.split('/')[-1:][0].split('.')[-1:][0])\r\n return image.split('/')[-1:][0][:-extension-1]", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)", "def get_labels(fasta_file):\n\t\tbase_name = basename(fasta_file)\n\t\tname = splitext(base_name)[0]\n\t\tlabel = name.split(\"_\")[-1]\n\t\tassert label == \"pos\" or label == \"hard\", \"AssertionError: label {} not found, possible labels pos, hard.\"\n\t\tif label == \"pos\":\n\t\t\treturn \"Toxin\"\n\t\telif label == \"hard\":\n\t\t\treturn \"No_toxin\"", "def _restore_image_name(self, data: Dict[str, str]) -> ImageName:\n return ImageName.parse(data[\"str\"])", "def _target_from_filename(filename):\n # Sample filename: PSI_Tray031_2015-12-26--17-38-25_top.png\n filename_regex = re.search(r'\\d+_([a-z]+).png', str(filename))\n str_label = filename_regex.groups()[0]\n label_dict = {'high': 1.0, 'low': 0.0}\n return label_dict[str_label]", "def get_name():\n return \"SVM\"", "def classifyPhaseImage(fr_nb):\n phase_path = os.path.join(\"..\",'data','microglia','Beacon-1 unst',\"Scene1Interval\"+str(fr_nb)+\"_PHASE.png\")\n \n phase= Image.open(phase_path)\n phase = np.asarray(phase)\n X=phase.reshape(-1,1)\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=3).fit(X)\n classified = kmeans.labels_\n classified=classified.reshape(phase.shape)\n si2(phase,classified,\"Phase image\",\"Classification\")\n return classified", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def get_name():\n return \"SVM Idea\"", "def get_classification(self, image):\n # Image pre-processing pipleine\n img = np.float32(image)\n img = preprocess_input(img)\n img = cv2.resize(img, (299, 299))\n img = np.expand_dims(img, 0)\n # Execute model's predictions - return probability value for each of 4 classes\n probs = self.model.predict(img)[0]\n # get class with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident about the prediction\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n # Swap label values as model was trained with different label values\n if g_x == 2:\n prediction = 0 # Red\n elif g_x == 0:\n prediction = 2 # Green\n elif g_x == 3:\n prediction = 1 # Yellow\n else:\n prediction = 3 # No light\n\n # Log the message\n rospy.loginfo(\"The label returned is %d\", prediction)\n\n # Return the light state corresponding to the index\n return prediction", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [self.classes[np.argmax(elem)] for elem in self.data]\n return tags", "def bb_labelname(hit):\n try:\n real_name = hit.group(1)\n L = Label.objects.get(name=real_name)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : L })\n return T.render(C)\n except:\n # This will throw if the requested label is spelt incorrectly, or doesnt exist\n return '<img src=\"/static/transmit.png\" alt=\"Invalid Label\" border=\"0\" /> %s' % (real_name)", "def get_classification(self, image):\n \n img = cv2.resize(src=image, dsize=(IN_IMAGE_HEIGHT,IN_IMAGE_WIDTH))\n img = img.astype(float)\n img = img / 255.0\n\n img = img[np.newaxis,:,:,:]\n\n with self.graph.as_default():\n predictions = self.model.predict(img)\n predicted_cat = np.argmax(predictions,axis=1)\n\n light = predicted_cat[0]\n# rospy.logwarn(\"Predicted = %i \", light)\n if(light==0):\n return TrafficLight.GREEN\n elif(light==1):\n return TrafficLight.YELLOW\n elif(light==2):\n return TrafficLight.RED\n return TrafficLight.UNKNOWN", "def extract_label(selector):\n return selector.split('=')[-1][:-1]", "def get_classification(self, image):\n if self.model is not None:\n im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im = im.astype('float32')\n im = preprocess_input(im)\n im_array = np.asarray(im)\n transformed_im_array = im_array[None, :, :, :]\n with self.graph.as_default():\n preds = self.model.predict(transformed_im_array, batch_size=1)\n return np.argmax(preds[0])\n return TrafficLight.UNKNOWN", "def image_name(name):\n \n # Gets the '.' position\n dot = name.find('.')\n # Slice the name from beginning and before '.'\n img = name[:dot]\n # return string with jpg format\n return \"{}.jpg\".format(img)", "def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]", "def get_classification(self, image):\n if self.model_type == 'tf':\n return self.run_tf_classifier(image)\n elif self.model_type == 'keras':\n return self.run_keras_classifier(image)\n else:\n return TrafficLight.UNKNOWN", "def get_label_image(probs, img_h, img_w):\n\n labels = probs.argmax(axis=2).astype('uint8')[:img_h, :img_w]\n label_im = Image.fromarray(labels, 'P')\n label_im.putpalette(_PALETTE)\n return label_im", "def create_label(self, loaded_img, loaded_label):\n _, label = cv2.threshold(loaded_label, 120, 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n label = cv2.dilate(label, kernel, iterations=1)\n _, contours, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if contours:\n areas = [cv2.contourArea(cnt) for cnt in contours]\n x, y, w, h = cv2.boundingRect(contours[np.argmax(areas)])\n label = label[y:y + h, x:x + w]\n return loaded_img.astype(np.float32) / 255, cv2.resize(label, (self.label_w, self.label_h)).astype(np.float32) / 255\n else:\n return loaded_img.astype(np.float32) / 255, np.zeros([self.label_h, self.label_w], dtype=np.float32)", "def image_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image_name\")", "def name(self, strippath=False):\n return _image.image_name(self, strippath)", "def labels_to_cityscapes_palette(image):\n classes=ZHANG_classes \n result =np.zeros((img.shape[0], img.shape[1], 3),dtype=np.uint8)\n for key, value in classes.items():\n result[np.where(img == key)] = value\n return result", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [[self.classes[np.argmax(subrow)] for subrow in row] for row in self.data]\n return tags", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def classname(self):\n if not self.has_classname():\n return \"\"\n\n classname_offset = self.unpack_dword(0x30)\n classname_length = self.unpack_word(0x4A)\n\n offset = self.abs_offset_from_hbin_offset(classname_offset)\n d = HBINCell(self._buf, offset, self)\n return struct.unpack_from(str(\"<%ds\") % (classname_length), self._buf, d.data_offset())[0].decode(\"utf-16le\").rstrip(\"\\x00\")", "def get_emotion_label(self, file_name):\n file_name = file_name[:-4]\n emotion_name = file_name.split('_')[-1] # the last is a position of emotion code\n return emotion_name", "def get_name():\n return \"SVMd+ - simplified approach\"", "def classify_image(image):\n image_path = image.filename\n image_data = np.array(Image.open(image.stream))\n image_data = skimage.img_as_float(image_data).astype(np.float2)\n with classifier_lock:\n classification = classifier.predict([image_data])[0]\n return {\"suggested_tags\": predicted_tags(classification),\n \"classification_vector\": classification,\n \"image_url\": image_path}", "def __getitem__(self, ind):\n image = Image.open(os.path.join(self.root_dir,self.image_fns[ind]))\n #If a transform is specified, apply it\n if self.transform is not None:\n image = self.transform(image)\n \n # Verify that image is in Tensor format\n #if type(image) is not torch.Tensor:\n # image = transform.ToTensor(image)\n\n # Convert multi-class label into binary encoding \n\n label = self.labels[ind]\n \n # Return the image and its label\n return (image, label)", "def label(image,**kw):\n # default connectivity in OpenCV: 8 (which is equivalent to...)\n # default connectivity in scikit-image: 2\n n, labels = cv2.connectedComponents(image.astype(uint8), connectivity=4)\n #n, labels = cv2.connectedComponentsWithAlgorithm(image.astype(uint8), connectivity=4, ltype=2, ccltype=cv2.CCL_DEFAULT)\n return labels, n-1\n # try: return measurements.label(image,**kw)\n # except: pass\n # types = [\"int32\",\"uint32\",\"int64\",\"uint64\",\"int16\",\"uint16\"]\n # for t in types:\n # try: return measurements.label(array(image,dtype=t),**kw)\n # except: pass\n # # let it raise the same exception as before\n # return measurements.label(image,**kw)", "def __getitem__(self, idx):\n img = self.images[idx]\n label = self.labels[idx].split(\" \")[-1]\n img = Image.open(img)\n img = img.convert('RGB')\n img = self.transform(img)\n return(img, label[:-1])", "def get_class_name(dataset, target):\n if dataset == \"mpeg7\":\n if target == 0: return \"Bone\"\n if target == 1: return \"Comma\"\n if target == 2: return \"Glas\"\n if target == 3: return \"HCircle\"\n if target == 4: return \"Heart\"\n if target == 5: return \"Misk\"\n if target == 6: return \"apple\"\n if target == 7: return \"bat\"\n if target == 8: return \"beetle\"\n if target == 9: return \"bell\"\n if target == 10: return \"bird\"\n if target == 11: return \"bottle\"\n if target == 12: return \"brick\"\n if target == 13: return \"butterfly\"\n if target == 14: return \"camel\"\n if target == 15: return \"car\"\n if target == 16: return \"carriage\"\n if target == 17: return \"cattle\"\n if target == 18: return \"cellular_phone\"\n if target == 19: return \"chicken\"\n if target == 20: return \"children\"\n if target == 21: return \"chopper\"\n if target == 22: return \"classic\"\n if target == 23: return \"crown\"\n if target == 24: return \"cup\"\n if target == 25: return \"deer\"\n if target == 26: return \"device0\"\n if target == 27: return \"device1\"\n if target == 28: return \"device2\"\n if target == 29: return \"device3\"\n if target == 30: return \"device4\"\n if target == 31: return \"device5\"\n if target == 32: return \"device6\"\n if target == 33: return \"device7\"\n if target == 34: return \"device8\"\n if target == 35: return \"device9\"\n if target == 36: return \"dog\"\n if target == 37: return \"elephant\"\n if target == 38: return \"face\"\n if target == 39: return \"fish\"\n if target == 40: return \"flatfish\"\n if target == 41: return \"fly\"\n if target == 42: return \"fork\"\n if target == 43: return \"fountain\"\n if target == 44: return \"frog\"\n if target == 45: return \"guitar\"\n if target == 46: return \"hammer\"\n if target == 47: return \"hat\"\n if target == 48: return \"horse\"\n if target == 49: return \"horseshoe\"\n if target == 50: return \"jar\"\n if target == 51: return \"key\"\n if target == 52: return \"lizzard\"\n if target == 53: return \"lmfish\"\n if target == 54: return \"octopus\"\n if target == 55: return \"pencil\"\n if target == 56: return \"personal_car\"\n if target == 57: return \"pocket\"\n if target == 58: return \"rat\"\n if target == 59: return \"ray\"\n if target == 60: return \"sea_snake\"\n if target == 61: return \"shoe\"\n if target == 62: return \"spoon\"\n if target == 63: return \"spring\"\n if target == 64: return \"stef\"\n if target == 65: return \"teddy\"\n if target == 66: return \"tree\"\n if target == 67: return \"truck\"\n if target == 68: return \"turtle\"\n if target == 69: return \"watch\"\n\n if dataset == \"leaf\":\n if target == 0: return \"Acer_Capillipes\"\n if target == 1: return \"Betula_Austrosinensis\"\n if target == 2: return \"Castanea_Sativa\"\n if target == 3: return \"Eucalyptus_Glaucescens\"\n if target == 4: return \"Fagus_Sylvatica\"\n if target == 5: return \"Ginkgo_Biloba\"\n if target == 6: return \"Ilex_Aquifolium\"\n if target == 7: return \"Liquidambar_Styraciflua\"\n if target == 8: return \"Magnolia_Heptapeta\"\n if target == 9: return \"Olea_Europaea\"\n if target == 10: return \"Populus_Adenopoda\"\n if target == 11: return \"Quercus_Afares\"\n if target == 12: return \"Rhododendron_x_Russellianum\"\n if target == 13: return \"Salix_Fragilis\"\n if target == 14: return \"Tilia_Oliveri\"\n if target == 15: return \"Ulmus_Bergmanniana\"\n if target == 16: return \"Viburnum_x_Rhytidophylloides\"\n if target == 17: return \"Zelkova_Serrata\"\n\n if dataset == \"fashion_mnist\":\n if target == 0: return \"Ankle_boot\"\n if target == 1: return \"Bag\"\n if target == 2: return \"Coat\"\n if target == 3: return \"Dress\"\n if target == 4: return \"Pullover\"\n if target == 5: return \"Sandal\"\n if target == 6: return \"Shirt\"\n if target == 7: return \"Sneaker\"\n if target == 8: return \"T_shirt\"\n if target == 9: return \"Trouser\"\n\n if dataset == \"nist\":\n if target == 0: return \"0\"\n if target == 1: return \"1\"\n if target == 2: return \"2\"\n if target == 3: return \"3\"\n if target == 4: return \"4\"\n if target == 5: return \"5\"\n if target == 6: return \"6\"\n if target == 7: return \"7\"\n if target == 8: return \"8\"\n if target == 9: return \"9\"\n if target == 10: return \"A\"\n if target == 11: return \"B\"\n if target == 12: return \"C\"\n if target == 13: return \"D\"\n if target == 14: return \"E\"\n if target == 15: return \"F\"\n if target == 16: return \"G\"\n if target == 17: return \"H\"\n if target == 18: return \"I\"\n if target == 19: return \"J\"\n if target == 20: return \"K\"\n if target == 21: return \"L\"\n if target == 22: return \"M\"\n if target == 23: return \"N\"\n if target == 24: return \"O\"\n if target == 25: return \"P\"\n if target == 26: return \"Q\"\n if target == 27: return \"R\"\n if target == 28: return \"S\"\n if target == 29: return \"T\"\n if target == 30: return \"U\"\n if target == 31: return \"V\"\n if target == 32: return \"W\"\n if target == 33: return \"X\"\n if target == 34: return \"Y\"\n if target == 35: return \"Z\"\n if target == 36: return \"a\"\n if target == 37: return \"b\"\n if target == 38: return \"c\"\n if target == 39: return \"d\"\n if target == 40: return \"e\"\n if target == 41: return \"f\"\n if target == 42: return \"g\"\n if target == 43: return \"h\"\n if target == 44: return \"i\"\n if target == 45: return \"j\"\n if target == 46: return \"k\"\n if target == 47: return \"l\"\n if target == 48: return \"m\"\n if target == 49: return \"n\"\n if target == 50: return \"o\"\n if target == 51: return \"p\"\n if target == 52: return \"q\"\n if target == 53: return \"r\"\n if target == 54: return \"s\"\n if target == 55: return \"t\"\n if target == 56: return \"u\"\n if target == 57: return \"v\"\n if target == 58: return \"w\"\n if target == 59: return \"x\"\n if target == 60: return \"y\"\n if target == 61: return \"z\"", "def create_readable_names_for_imagenet_labels():\n\n base_url = 'http://cnbj1-fds.api.xiaomi.net/ml-datasets/imagenet/' # noqa\n synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)\n synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)\n\n filename, _ = urllib.urlretrieve(synset_url)\n synset_list = [s.strip() for s in open(filename).readlines()]\n num_synsets_in_ilsvrc = len(synset_list)\n assert num_synsets_in_ilsvrc == 1000\n\n filename, _ = urllib.urlretrieve(synset_to_human_url)\n synset_to_human_list = open(filename).readlines()\n num_synsets_in_all_imagenet = len(synset_to_human_list)\n assert num_synsets_in_all_imagenet == 21842\n\n synset_to_human = {}\n for s in synset_to_human_list:\n parts = s.strip().split('\\t')\n assert len(parts) == 2\n synset = parts[0]\n human = parts[1]\n synset_to_human[synset] = human\n\n label_index = 1\n labels_to_names = {0: 'background'}\n for synset in synset_list:\n name = synset_to_human[synset]\n labels_to_names[label_index] = name\n label_index += 1\n\n return labels_to_names", "def get_classification(self, image):\n # Light color prediction\n detections = self.run_detection(image)\n boxes, scores, classes = self.filter_boxes(0.6, detections)\n # Scores are ordered highest -> lowest\n if len(classes) > 0:\n if self.label_map[classes[0]] == 'red':\n # rospy.logwarn('Red Light: {}'.format(scores[0]))\n return TrafficLight.RED\n # rospy.logwarn('Proceeding')\n \n return TrafficLight.UNKNOWN", "def get_name():\n return \"SVMd+\"", "def find_name(face):\n if not face[\"MatchedFaces\"]:\n return \"\"\n confidence = face[\"MatchedFaces\"][0][\"Similarity\"]\n if confidence < CONFIDENCE_THRESHOLD:\n return \"\"\n return face[\"MatchedFaces\"][0][\"Face\"][\"ExternalImageId\"]", "def get_classification(self, image):\n #TODO implement light color prediction\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n box = self.detection(image)\n if box == None:\n #rospy.loginfo('Classifier: No box found')\n return TrafficLight.UNKNOWN\n\n left, right, top, bottom = box\n img_crop = image[top:bottom, left:right]\n traffic_light = cv2.resize(img_crop, (32, 32))\n classification = self.classification(traffic_light)\n return classification", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def Name(cls) -> str:\n return 'pixel'", "def _get_label(self):\n return self.label", "def get_class_label(index):\n if isinstance(index,str):\n index = int(index)\n # print(type(index))\n if index < len(class_label):\n return class_label[index]\n basic.outputlogMessage('class index: %d not found in the class list' % index)\n assert (False)\n return False", "def image_name(self) -> str:\n return self._image_name", "def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):", "def get_detection_class_as_text(self, n: int) -> str:\n return dotty(self.json)[f'predictions.0.detection_classes_as_text.{n}']", "def get_original_label(filename):\n original_label = filename.replace('.tab', '.lbl')\n original_label = original_label.replace('.txt', '.lbl')\n original_label = original_label.replace('.TAB', '.lbl')\n original_label = original_label.replace('.TXT', '.lbl')\n original_label = original_label.replace('rad', 'psv')\n original_label = original_label.replace('RAD', 'PSV')\n return original_label", "def get_classification(self, image):\n #TODO implement light color prediction\n choices = {0: \"GREEN\", 1: \"YELLOW\", 2: \"RED\", 3: \"UNKNOWN\"}\n\n if self.capture_images:\n cv2.imwrite(self.imgPath+str(int(time.clock()*1000))+'.jpg', image)\n print('[TLClassifier] Saved Image ... ')\n\n if self.debug:\n print('[TL Classifier] invoked... ')\n\n if image.shape != (300, 200, 3):\n print('[TL Classifier] image shape NOK: ' + str(image.shape))\n return \"UNKNOWN shape\"\n \n assert image.shape == (300, 200, 3)\n if self.debug:\n print('[TL Classifier] assertion ok: ')\n\n res = None\n res = cv2.resize(image, (32,32), interpolation = cv2.INTER_CUBIC)\n image = res.reshape(1, 32, 32, 3)\n classification = self.model.predict_classes(image, verbose=0)[0]\n result = choices.get(classification, 'UNKNOWN')\n\n if self.verbose:\n print('[TL Classifier] ' + result + ' detected.')\n\n return result", "def get_classification(self, image):\n if self.classifier is None:\n self.classifier = TrafficLightClassifier('./light_classification/tensor/linux_tensor0.999')\n \n lights = (TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.GREEN)\n return lights[self.classifier.classifyImage(image)]\n \n return TrafficLight.UNKNOWN", "def get_label(urs):\n return assign_term(urs)[1]", "def get_label(self, hierarchy: List[str]) -> Any:", "def name(self) -> str:\n return self.class_names[self.class_num]", "def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret", "def getSlavename():", "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "def fromLabel(name):\n return Data.labels.index(name)", "def get_classification(self, image):\n\tresult = TrafficLight.UNKNOWN\n\n\t\"\"\"Convert the image into the proper format\"\"\"\n#\timage_np = self.load_image_into_numpy_array(image)\n\tself.image_np_expanded = np.expand_dims(image, axis=0)\n\n\t\"\"\"Apply detection\"\"\"\n\t(boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: self.image_np_expanded})\n\n\t\"\"\"And check if a green/Red light has been found\"\"\"\n\tif( num > 0):\n\t #find the highest score in the scores list\n\t max_score_idx = np.squeeze(scores).argmax()\n\t # and get the class going with this score\n\t tf_result = np.squeeze(classes).astype(np.int32)[max_score_idx]\n\t # convert from the TF result to internal format\n\t if( tf_result == 1 ):\n\t\tresult = TrafficLight.GREEN\n\t elif( tf_result == 2 ):\n\t\tresult = TrafficLight.RED\n\t elif( tf_result == 3 ):\n\t\tresult = TrafficLight.YELLOW\n\n\treturn result", "def image_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_name\")", "def image_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_name\")", "def predict_class(self, image_path):\n\n img_array = self.process_image(image_path)\n predictions = self.model.predict(img_array)\n vehicle = self._mappings[np.argmax(abs(predictions))]\n return vehicle", "def get_name():\n return \"SVM+\"", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name" ]
[ "0.6750595", "0.67307", "0.6638844", "0.6555292", "0.6546279", "0.65367347", "0.64170223", "0.6413202", "0.6403757", "0.6369213", "0.63066596", "0.6250891", "0.6241231", "0.6123709", "0.6118442", "0.61034334", "0.60980034", "0.6039952", "0.6016841", "0.60058093", "0.5924414", "0.59056926", "0.5864966", "0.5863928", "0.5861977", "0.5861145", "0.5860533", "0.58571535", "0.58541787", "0.5839833", "0.58390033", "0.5792985", "0.57929665", "0.5782603", "0.57703495", "0.5768289", "0.57676125", "0.57513183", "0.5744902", "0.5741997", "0.57403904", "0.57143205", "0.5709338", "0.5708936", "0.5706425", "0.5706373", "0.57030636", "0.57023925", "0.5692423", "0.5686754", "0.56774825", "0.5675331", "0.5673088", "0.5638394", "0.56356883", "0.5632707", "0.5629298", "0.56290394", "0.5626605", "0.5622413", "0.56219023", "0.56136036", "0.56104314", "0.56093955", "0.5603056", "0.55962676", "0.5583624", "0.55769646", "0.55766404", "0.5550331", "0.5542061", "0.5541962", "0.5538014", "0.553654", "0.552885", "0.5512706", "0.55124813", "0.55103534", "0.550522", "0.55035913", "0.5494429", "0.5491062", "0.54897845", "0.5488392", "0.5487298", "0.5485232", "0.5480607", "0.54805005", "0.54635096", "0.5462617", "0.545966", "0.545013", "0.5443249", "0.543942", "0.5430758", "0.54279697", "0.54279697", "0.54232824", "0.5419835", "0.54192203" ]
0.7817808
0
Specify where files in our default root are uploaded.
def upload_location(instance, filename): new_id = randint(0, 1000) return "%s/%s" % (new_id, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())", "def public_upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,\n self.public_upload_dir_rel())", "def upload_dir_rel(self):\n return os.path.join(self.short_name,\"uploads\")", "def upload_to(self, filename):\n base_path = self.base_upload_to()\n return '%s/%s/%s' % (base_path, 'filemanager', filename)", "def file_root(self):\n return os.path.join(CredentialApplication.FILE_ROOT, self.slug)", "def upload_shared():\n # MARK: default copy to home dir\n put(conf.INS_ARGS['shared_folder'], '~/')", "def public_upload_dir_rel(self):\n return os.path.join(self.short_name,settings.COMIC_PUBLIC_FOLDER_NAME)", "def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')", "def configure_ext_uploads(app):\n configure_uploads(app, [documents])", "def set_basedir(self, host, path):", "def _init_files_dirs(self):\n self.local.create_files_dirs()\n self.remote.create_files_dirs()", "def set_upload_destination(instance, filename):\n return os.path.join(\"evidence\", str(instance.finding.report.id), filename)", "def project_root_files():\n return [\"parent_workflow.wdl\"]", "def _files_path(self) -> Path:\r\n files_path = self.output_path / \"files\"\r\n os.makedirs(files_path, exist_ok=True)\r\n return files_path", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def _get_default_path(self):\n return os.path.join(action_datasets.ROOT_DIR, 'data', 'Actions')", "def get_project_data_folder(self):\n return os.path.join(settings.MEDIA_ROOT,self.short_name)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def localpath(self, *args):\n return os.path.join(os.path.expanduser(self.serverfiles_dir), *args)", "def getDefaultDataSearchPath():\n return FileSearchPath(os.path.dirname(__file__))", "def defaultDirectory(self):\n return self.__defaultDirectory", "def subdir(self):", "def default_data_dir(self):\n return self._default_data_dir", "def default_path():\n return os.path.join(os.environ.get('OVERRIDE_ETC', '/etc'), 'auth')", "def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")", "def DefaultPath(self) -> str:\n return self.m_def_path", "def additional_files(self):\n path = os.path.join(self.path(), 'obb_files')\n os.makedirs(path, exist_ok=True)\n return path", "def uploaded_image_path(filename):\n return '/'.join((app.config['UPLOAD_FOLDER'], filename))", "def base_dir(self):\n pass", "def templates_folder(self):\n return os.path.join(\n os.path.dirname(__file__), \"default_config\", \"divvy_templates\"\n )", "def sendRootListing(self):\n\t\t# Escape the path to allow for files above the current directory.\n\t\tpaths = map(self.rootFileNameToPath, self.files)\n\t\tself.sendListing(self.files, paths)", "def full_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.path)", "def _put_antenny_files_on_device(self):\n self._ensure_directory()\n self._recursive_put_files()", "def radishdir():\n return __RADISH_FILES_DIR__", "def set_root(self, root):\n self.root_path = root", "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "def root_path(self) -> Path:\n return ARCHIVES_ROOT / self.source_name / self.key", "def root_path(self):\n return os.path.dirname(self.image.path)", "def get_default_file_path(file_name: str) -> str:\n return join(SOURCE_PATH, 'data', file_name)", "def find_default(self, fs_path):\n if os.path.isdir(fs_path):\n default = None\n for name in self.defaults:\n _path = os.path.join(fs_path, name)\n if os.path.isfile(_path):\n default = _path\n break\n if default is None:\n raise Response(403)\n fs_path = default\n return fs_path", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def make_default_dirs(self):\r\n self.defaultconfig()\r\n self.create_needed_dirs()", "def set_local_path(self):\n return HERE", "def user_directory_path(instance, filename: str) -> str:\n\n # File will be uploaded to MEDIA_ROOT/user_<id>/<filename>\n return 'user_{0}/{1}'.format(instance.profile.user.pk, filename)", "def filepaths(self):\n pass", "def __post_init__(self) -> None:\n if self.is_directory and not self.path.endswith('/'):\n self.path += '/'", "def cwd (self, path):\r\n pass", "def path(self):\n return self.get_upload_set().path(self.filename)", "def index_all_files(self, root_dir):\n pass", "def thumbnail_upload_to(self, filename):\n base_path = self.base_upload_to()\n return '%s/%s/%s' % (base_path, 'filemanager_thumbnails', filename)", "def set_root(self):\n config_dir = os.path.expanduser(\"~/.local/shs\")\n config_file = os.path.join(config_dir, \"shs_gui.cfg\")\n # check the file and create one if it's not there\n if not os.path.isfile(config_file):\n os.makedirs(config_dir)\n open(config_file, 'w').close()\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n # if config exists and has needed option\n if config.has_option(\"general\", \"root_dir\"):\n return config.get(\"general\", \"root_dir\")\n # make config\n if not config.has_section(\"general\"):\n config.add_section(\"general\")\n dlg = wx.DirDialog(self, \"Select root directory\")\n if dlg.ShowModal() == wx.ID_OK:\n root_dir = dlg.GetPath()\n config.set(\"general\", \"root_dir\", root_dir)\n else:\n sys.exit(1)\n with open(config_file, 'w') as f:\n config.write(f)\n return root_dir", "def get_upload_path(instance, filename):\n \n userpath = \"{name}/{file}\".format(name=instance.user.username, file=filename)\n mainpath = os.path.join(\"infocomp\",userpath)\n return mainpath", "def __default_pptx_path(self):\n thisdir = os.path.split(__file__)[0]\n return os.path.join(thisdir, 'templates', 'default.pptx')", "def setPaths(self):\n self.local_path = g.os_path_join(g.app.loadDir,\"..\",\"plugins\",\"trees\")\n # self.remote_path = r\"cvs.sourceforge.net/viewcvs.py/leo/leo/plugins/trees\"\n self.remote_path = r'leo.tigris.org/source/browse/leo/plugins/trees'", "def get_swagger_static_root():\n return os.path.join(CURDIR, \"static\")", "def root_dir():\r\n return Path(__file__).parent.parent", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'vehicles_dataset_v{}'.format(self._version))", "def base_upload_to(self):\n parent = getattr(self, (self.get_parent()))\n remote = self._meta.get_field(self.get_parent()).remote_field.name\n return '%s/%s' % (self.parent_base_upload_to(parent), remote)", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def base_dir(self, value):\n pass", "def get_upload_path(instance, filename):\n return os.path.join(getattr(settings, \"FILEBROWSER_DIRECTORY\"), str(instance.project.short_name.lower()), filename)", "def set_media_root():\n tmp_path = Path(tempfile.mkdtemp(prefix=\"pytest_\"))\n try:\n with override_settings(MEDIA_ROOT=tmp_path):\n yield\n finally:\n rmtree(tmp_path)", "def set_rootdir(configdict, config_file):\n if 'rootdir' not in configdict or not configdict['rootdir']:\n configdict['rootdir'] = os.path.dirname(config_file)", "def files_distribute(self):\n self._post('files/distribute')", "def test_get_upload_directory(self, mock_config_file):\n configuration = Configuration()\n assert configuration.upload_dir == os.path.join(configuration.app_workdir, '.labmanager', 'upload')", "def base_path(self):\n return self.setup.base_path", "def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'", "def __init__(self, root):\n FileHelper.ALL_PATHS = [os.path.join(dp, f) for dp, dn, filenames in os.walk(root) for f in filenames if os.path.splitext(f)[1] in Enums.App.VALID_FILE_TYPES]", "def get_root_filename(self):\n pass", "def _get_psf_filepath(self):\n\t\treturn os.path.join(self.workdir, \"default.psf\")", "def __get_files(self):\n if len(self.files) == 0:\n self.files = os.listdir(self.__path())\n self.files.sort()\n if self.parent:\n self.files.insert(0, \"..\")\n for index, name in enumerate(self.files, start=1):\n if self.__is_dir(self.__make_path(name)):\n self.files[index] = name + \"/\"", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def uploadFiles(self, filenames):\n bucket = self._S3_USER_UPLOAD_BUCKET\n prefix = self._S3_USER_UPLOAD_DIR\n uuid_dir = uuid.uuid4()\n # TODO(aimee): This should upload to a user-namespaced directory\n for filename in filenames:\n basename = os.path.basename(filename)\n response = self._upload_s3(filename, bucket, f\"{prefix}/{uuid_dir}/{basename}\")\n return f\"Upload file subdirectory: {uuid_dir} (keep a record of this if you want to share these files with other users)\"", "def __init__(self, file_root):\n self.root = file_root", "def image_upload_path(instance, filename):\n return \"adverts/{}/{}\".format(instance.uuid, filename)", "def __init__(self, rootPath=None):\n self.rootPath = rootPath or '.'", "def test_default_path(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"data-path\"], FilePath(b\"/var/lib/flocker\"))", "def get_attachment_upload_dir(instance, filename):\n return f\"{tasks}/{attachments}/{str(instance.task.id)}/{filename}\"", "def Directory(self) -> str:", "def PopulateFilePaths(self):\n if os.path.isdir(self.backupFolder) == True:\n s3Log.info(\"BackUp Folder = {}\".format(self.backupFolder))\n backUpFilestoTransfer = (os.listdir(self.backupFolder))\n for eachfilename in backUpFilestoTransfer:\n path = os.path.join(self.backupFolder, eachfilename)\n filedictionary={\n \"filename\": eachfilename,\n \"filepath\": path,\n \"uploadedSuccess\": 0\n }\n self.fileTobeUploaded.append(filedictionary)\n\n s3Log.info(\"{} files are to be uploaded. \".format(len(self.fileTobeUploaded) ))\n pprint.pprint(self.fileTobeUploaded)", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def _abspath(filename):\r\n if os.path.isabs(filename):\r\n return filename\r\n return os.path.join(settings.MEDIA_ROOT, filename)", "def path(self, root_dir):\r\n path = os.path.realpath(root_dir)\r\n if not os.path.exists(path):\r\n raise ValueError('Build root does not exist: %s' % root_dir)\r\n self._root_dir = path", "def uploaded_file(filename):\n return send_from_directory('/static/images/uploads/', filename)", "def upload():\r\n\r\n if not os.path.isdir(TO_SEGMENT):\r\n os.mkdir(TO_SEGMENT)\r\n else:\r\n print(\"could not create upload directory: {}\".format(TO_SEGMENT))\r\n print(request.files.getlist(\"file\"))\r\n\r\n for upload in request.files.getlist(\"file\"):\r\n filename = upload.filename\r\n destination = \"/\".join([TO_SEGMENT, filename])\r\n upload.save(destination)\r\n\r\n return redirect(url_for('get_gallery'))", "def mog_param_dir():\n return os.path.join(os.path.dirname(__file__),\n '../../','etc')", "def setNfsRoot(self):\n\t\tself.nfsroot = self.settings.getKeyValue('nfs.root')\n\t\treturn None", "def fresh_media_root(**kwargs):\n with TemporaryDirectory(**kwargs) as media_root:\n with override_settings(MEDIA_ROOT=media_root):\n yield", "def create_upload_path(self, update=False):\n path = f\"{self.root_dir}/{self.current_prod}\"\n file_name = f\"{self.event_date_str}_{self.event_id}.npy\"\n\n if update:\n self.upload_path = path\n self.file_name = file_name\n\n return path, file_name", "def get_upload_path(self):\n location = self.get_storage().location\n return self.cleaned_data['key_name'][len(location):]", "def default_module_dir(self):\n return os.path.dirname(self._modules['default'].path)", "def _get_default_path(self):\n\n raise NotImplementedError()", "def update_filepath(image):\n if image == \"none\":\n image = \"\"\n else:\n image = '/static/uploads/' + image\n return image", "def root_dir():\n return dirname(dirname(__file__))", "def workDir(self):\n self.debug.printHeader()\n #if hasattr(self.settings, \"workDir\"): toret=self.settings.workDir # 025 todo 143\n if self.settings.config.has_section(\"files\") and self.settings.config.has_option(\"files\",\"workDir\"):\n # toret=self.settings.get(\"files\",\"workDir\") 025\n toret=self.settings.workDir\n else: toret=os.environ['HOME']+'/xxz'\n # Also could write workdir back to settings.\n return toret", "def share_directory(self):\n # Get the user to share file/folder with.\n share_user = User.query.filter_by(email = self.email.data).first()\n if not share_user:\n return\n\n # The source to copy to another user.\n filename = os.listdir(self.path.data)[int(self.index.data)]\n src = os.path.join(self.path.data, filename)\n # Get home path for the user to share folder with.\n dst = os.path.join(share_user.get_files_path(), filename)\n # Copy source to destination.\n copytree(src, dst)", "def upload(self, source, dest):\n if os.path.isdir(source):\n self.upload_dir(source, dest)\n else:\n self.upload_file(source, dest)", "def cwd(self):" ]
[ "0.6755884", "0.67076856", "0.6568125", "0.6368969", "0.62236637", "0.61067545", "0.6066317", "0.6020004", "0.5959959", "0.59437656", "0.5911991", "0.58240676", "0.5714736", "0.57077825", "0.5693331", "0.5683937", "0.5659573", "0.5636132", "0.56256604", "0.56158495", "0.5602003", "0.55920255", "0.55835116", "0.5568021", "0.55421644", "0.5527479", "0.5519173", "0.5508685", "0.5504101", "0.5490817", "0.5484158", "0.5479387", "0.5478391", "0.54529727", "0.54310274", "0.54275006", "0.5426266", "0.54240495", "0.5409397", "0.5405468", "0.5404147", "0.5399965", "0.5399965", "0.53921825", "0.53905725", "0.5382851", "0.5377016", "0.53704023", "0.5358107", "0.5335689", "0.53278595", "0.5326075", "0.531962", "0.53177905", "0.53086567", "0.5303613", "0.53014433", "0.52924997", "0.528891", "0.5284838", "0.52814037", "0.52779895", "0.52743816", "0.52631235", "0.52613693", "0.52610517", "0.5255176", "0.525516", "0.5246837", "0.52408314", "0.52285576", "0.5226732", "0.5212047", "0.5210766", "0.5194046", "0.51895183", "0.51838195", "0.5182333", "0.51822084", "0.51778865", "0.5173515", "0.51698226", "0.5169441", "0.51679695", "0.51661885", "0.51655483", "0.5160284", "0.5149701", "0.51495224", "0.5147817", "0.5146539", "0.51394194", "0.513725", "0.5136348", "0.51280224", "0.5124411", "0.5122602", "0.5121911", "0.511201", "0.5109926", "0.5103213" ]
0.0
-1